From b00d0641a673a8a76e51b3641357d71e2ffb74b5 Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Wed, 25 Feb 2026 00:27:47 +0100 Subject: [PATCH 01/11] Extend `._tensor_impl` with remaining functions used by dpnp (#2758) This PR extends `_tensor_impl` in `dpctl_ext.tensor` with the remaining functions that are explicitly used in `dpnp` implementations (`_take`, `_full_usm_ndarray`, `_zeros_usm_ndarray`, `_triu`) enabling a complete switch to `dpctl_ext.tensor._tensor_impl` instead of `dpctl.tensor._tensor_impl` It also adds `take()`, `put()`, `full()`,`tril()` and `triu()` to `dpctl_ext.tensor` and updates the corresponding dpnp functions to use these implementations internally --- dpctl_ext/tensor/CMakeLists.txt | 8 +- dpctl_ext/tensor/__init__.py | 19 + dpctl_ext/tensor/_ctors.py | 326 +++++++ dpctl_ext/tensor/_indexing_functions.py | 329 +++++++ dpctl_ext/tensor/_numpy_helper.py | 45 + .../include/kernels/constructors.hpp | 303 +++++++ .../kernels/integer_advanced_indexing.hpp | 418 +++++++++ .../tensor/libtensor/source/full_ctor.cpp | 311 +++++++ .../tensor/libtensor/source/full_ctor.hpp | 57 ++ .../source/integer_advanced_indexing.cpp | 817 ++++++++++++++++++ .../source/integer_advanced_indexing.hpp | 71 ++ .../tensor/libtensor/source/tensor_ctors.cpp | 144 ++- .../tensor/libtensor/source/triul_ctor.cpp | 246 ++++++ .../tensor/libtensor/source/triul_ctor.hpp | 58 ++ .../tensor/libtensor/source/zeros_ctor.cpp | 161 ++++ .../tensor/libtensor/source/zeros_ctor.hpp | 54 ++ dpnp/dpnp_algo/dpnp_fill.py | 9 +- dpnp/dpnp_container.py | 7 +- dpnp/dpnp_iface.py | 1 + dpnp/dpnp_iface_indexing.py | 11 +- dpnp/fft/dpnp_utils_fft.py | 14 +- dpnp/linalg/dpnp_utils_linalg.py | 5 +- dpnp/scipy/linalg/_utils.py | 1 + 23 files changed, 3325 insertions(+), 90 deletions(-) create mode 100644 dpctl_ext/tensor/_ctors.py create mode 100644 dpctl_ext/tensor/_indexing_functions.py create mode 100644 dpctl_ext/tensor/_numpy_helper.py create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/full_ctor.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/full_ctor.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/triul_ctor.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/triul_ctor.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index ed69b4f10cba..fd781a9f9586 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -52,12 +52,12 @@ set(_tensor_impl_sources # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_reshape.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_roll.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/integer_advanced_indexing.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/integer_advanced_indexing.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_advanced_indexing.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/eye_ctor.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/zeros_ctor.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/triul_ctor.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/zeros_ctor.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/triul_ctor.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/device_support_queries.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/repeat.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index a71324cb88d8..3c6939eff7a0 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -25,3 +25,22 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** + + +from dpctl_ext.tensor._ctors import ( + full, + tril, + triu, +) +from dpctl_ext.tensor._indexing_functions import ( + put, + take, +) + +__all__ = [ + "full", + "put", + "take", + "tril", + "triu", +] diff --git a/dpctl_ext/tensor/_ctors.py b/dpctl_ext/tensor/_ctors.py new file mode 100644 index 000000000000..a0e7b28e66ff --- /dev/null +++ b/dpctl_ext/tensor/_ctors.py @@ -0,0 +1,326 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import operator +from numbers import Number + +import dpctl +import dpctl.tensor as dpt +import dpctl.utils +import numpy as np +from dpctl.tensor._data_types import _get_dtype +from dpctl.tensor._device import normalize_queue_device + +import dpctl_ext.tensor._tensor_impl as ti + + +def _cast_fill_val(fill_val, dt): + """ + Casts the Python scalar `fill_val` to another Python type coercible to the + requested data type `dt`, if necessary. + """ + val_type = type(fill_val) + if val_type in [float, complex] and np.issubdtype(dt, np.integer): + return int(fill_val.real) + elif val_type is complex and np.issubdtype(dt, np.floating): + return fill_val.real + elif val_type is int and np.issubdtype(dt, np.integer): + return _to_scalar(fill_val, dt) + else: + return fill_val + + +def _to_scalar(obj, sc_ty): + """A way to convert object to NumPy scalar type. + Raises OverflowError if obj can not be represented + using the requested scalar type. + """ + zd_arr = np.asarray(obj, dtype=sc_ty) + return zd_arr[()] + + +def _validate_fill_value(fill_val): + """Validates that `fill_val` is a numeric or boolean scalar.""" + # TODO: verify if `np.True_` and `np.False_` should be instances of + # Number in NumPy, like other NumPy scalars and like Python bools + # check for `np.bool_` separately as NumPy<2 has no `np.bool` + if not isinstance(fill_val, Number) and not isinstance(fill_val, np.bool_): + raise TypeError( + f"array cannot be filled with scalar of type {type(fill_val)}" + ) + + +def full( + shape, + fill_value, + *, + dtype=None, + order="C", + device=None, + usm_type=None, + sycl_queue=None, +): + """ + Returns a new :class:`dpctl.tensor.usm_ndarray` having a specified + shape and filled with `fill_value`. + + Args: + shape (tuple): + Dimensions of the array to be created. + fill_value (int,float,complex,usm_ndarray): + fill value + dtype (optional): data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, + or a NumPy scalar type. Default: ``None`` + order ("C", or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + New array initialized with given value. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + dpctl.utils.validate_usm_type(usm_type, allow_none=True) + + if isinstance(fill_value, (dpt.usm_ndarray, np.ndarray, tuple, list)): + if ( + isinstance(fill_value, dpt.usm_ndarray) + and sycl_queue is None + and device is None + ): + sycl_queue = fill_value.sycl_queue + else: + sycl_queue = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + X = dpt.asarray( + fill_value, + dtype=dtype, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + return dpt.copy(dpt.broadcast_to(X, shape), order=order) + else: + _validate_fill_value(fill_value) + + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + usm_type = usm_type if usm_type is not None else "device" + dtype = _get_dtype(dtype, sycl_queue, ref_type=type(fill_value)) + res = dpt.usm_ndarray( + shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + fill_value = _cast_fill_val(fill_value, dtype) + + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(fill_value, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) + return res + + +def tril(x, /, *, k=0): + """ + Returns the lower triangular part of a matrix (or a stack of matrices) + ``x``. + + The lower triangular part of the matrix is defined as the elements on and + below the specified diagonal ``k``. + + Args: + x (usm_ndarray): + Input array + k (int, optional): + Specifies the diagonal above which to set + elements to zero. If ``k = 0``, the diagonal is the main diagonal. + If ``k < 0``, the diagonal is below the main diagonal. + If ``k > 0``, the diagonal is above the main diagonal. + Default: ``0`` + + Returns: + usm_ndarray: + A lower-triangular array or a stack of lower-triangular arrays. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError( + "Expected argument of type dpctl.tensor.usm_ndarray, " + f"got {type(x)}." + ) + + k = operator.index(k) + + order = "F" if (x.flags.f_contiguous) else "C" + + shape = x.shape + nd = x.ndim + if nd < 2: + raise ValueError("Array dimensions less than 2.") + + q = x.sycl_queue + if k >= shape[nd - 1] - 1: + res = dpt.empty( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + _manager = dpctl.utils.SequentialOrderManager[q] + dep_evs = _manager.submitted_events + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=x, dst=res, sycl_queue=q, depends=dep_evs + ) + _manager.add_event_pair(hev, cpy_ev) + elif k < -shape[nd - 2]: + res = dpt.zeros( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + else: + res = dpt.empty( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + _manager = dpctl.utils.SequentialOrderManager[q] + dep_evs = _manager.submitted_events + hev, tril_ev = ti._tril( + src=x, dst=res, k=k, sycl_queue=q, depends=dep_evs + ) + _manager.add_event_pair(hev, tril_ev) + + return res + + +def triu(x, /, *, k=0): + """ + Returns the upper triangular part of a matrix (or a stack of matrices) + ``x``. + + The upper triangular part of the matrix is defined as the elements on and + above the specified diagonal ``k``. + + Args: + x (usm_ndarray): + Input array + k (int, optional): + Specifies the diagonal below which to set + elements to zero. If ``k = 0``, the diagonal is the main diagonal. + If ``k < 0``, the diagonal is below the main diagonal. + If ``k > 0``, the diagonal is above the main diagonal. + Default: ``0`` + + Returns: + usm_ndarray: + An upper-triangular array or a stack of upper-triangular arrays. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError( + "Expected argument of type dpctl.tensor.usm_ndarray, " + f"got {type(x)}." + ) + + k = operator.index(k) + + order = "F" if (x.flags.f_contiguous) else "C" + + shape = x.shape + nd = x.ndim + if nd < 2: + raise ValueError("Array dimensions less than 2.") + + q = x.sycl_queue + if k > shape[nd - 1]: + res = dpt.zeros( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + elif k <= -shape[nd - 2] + 1: + res = dpt.empty( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + _manager = dpctl.utils.SequentialOrderManager[q] + dep_evs = _manager.submitted_events + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=x, dst=res, sycl_queue=q, depends=dep_evs + ) + _manager.add_event_pair(hev, cpy_ev) + else: + res = dpt.empty( + x.shape, + dtype=x.dtype, + order=order, + usm_type=x.usm_type, + sycl_queue=q, + ) + _manager = dpctl.utils.SequentialOrderManager[q] + dep_evs = _manager.submitted_events + hev, triu_ev = ti._triu( + src=x, dst=res, k=k, sycl_queue=q, depends=dep_evs + ) + _manager.add_event_pair(hev, triu_ev) + + return res diff --git a/dpctl_ext/tensor/_indexing_functions.py b/dpctl_ext/tensor/_indexing_functions.py new file mode 100644 index 000000000000..106df09cf97e --- /dev/null +++ b/dpctl_ext/tensor/_indexing_functions.py @@ -0,0 +1,329 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import operator + +import dpctl +import dpctl.tensor as dpt +import dpctl.utils + +import dpctl_ext.tensor._tensor_impl as ti + +from ._numpy_helper import normalize_axis_index + + +def _get_indexing_mode(name): + modes = {"wrap": 0, "clip": 1} + try: + return modes[name] + except KeyError: + raise ValueError( + "`mode` must be `wrap` or `clip`." "Got `{}`.".format(name) + ) + + +def put(x, indices, vals, /, *, axis=None, mode="wrap"): + """put(x, indices, vals, axis=None, mode="wrap") + + Puts values into an array along a given axis at given indices. + + Args: + x (usm_ndarray): + The array the values will be put into. + indices (usm_ndarray): + One-dimensional array of indices. + vals (usm_ndarray): + Array of values to be put into ``x``. + Must be broadcastable to the result shape + ``x.shape[:axis] + indices.shape + x.shape[axis+1:]``. + axis (int, optional): + The axis along which the values will be placed. + If ``x`` is one-dimensional, this argument is optional. + Default: ``None``. + mode (str, optional): + How out-of-bounds indices will be handled. Possible values + are: + + - ``"wrap"``: clamps indices to (``-n <= i < n``), then wraps + negative indices. + - ``"clip"``: clips indices to (``0 <= i < n``). + + Default: ``"wrap"``. + + .. note:: + + If input array ``indices`` contains duplicates, a race condition + occurs, and the value written into corresponding positions in ``x`` + may vary from run to run. Preserving sequential semantics in handing + the duplicates to achieve deterministic behavior requires additional + work, e.g. + + :Example: + + .. code-block:: python + + from dpctl import tensor as dpt + + def put_vec_duplicates(vec, ind, vals): + "Put values into vec, handling possible duplicates in ind" + assert vec.ndim, ind.ndim, vals.ndim == 1, 1, 1 + + # find positions of last occurrences of each + # unique index + ind_flipped = dpt.flip(ind) + ind_uniq = dpt.unique_all(ind_flipped).indices + has_dups = len(ind) != len(ind_uniq) + + if has_dups: + ind_uniq = dpt.subtract(vec.size - 1, ind_uniq) + ind = dpt.take(ind, ind_uniq) + vals = dpt.take(vals, ind_uniq) + + dpt.put(vec, ind, vals) + + n = 512 + ind = dpt.concat((dpt.arange(n), dpt.arange(n, -1, step=-1))) + x = dpt.zeros(ind.size, dtype="int32") + vals = dpt.arange(ind.size, dtype=x.dtype) + + # Values corresponding to last positions of + # duplicate indices are written into the vector x + put_vec_duplicates(x, ind, vals) + + parts = (vals[-1:-n-2:-1], dpt.zeros(n, dtype=x.dtype)) + expected = dpt.concat(parts) + assert dpt.all(x == expected) + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError( + "Expected instance of `dpt.usm_ndarray`, got `{}`.".format(type(x)) + ) + if not isinstance(indices, dpt.usm_ndarray): + raise TypeError( + "`indices` expected `dpt.usm_ndarray`, got `{}`.".format( + type(indices) + ) + ) + if isinstance(vals, dpt.usm_ndarray): + queues_ = [x.sycl_queue, indices.sycl_queue, vals.sycl_queue] + usm_types_ = [x.usm_type, indices.usm_type, vals.usm_type] + else: + queues_ = [x.sycl_queue, indices.sycl_queue] + usm_types_ = [x.usm_type, indices.usm_type] + if indices.ndim != 1: + raise ValueError( + "`indices` expected a 1D array, got `{}`".format(indices.ndim) + ) + if indices.dtype.kind not in "ui": + raise IndexError( + "`indices` expected integer data type, got `{}`".format( + indices.dtype + ) + ) + exec_q = dpctl.utils.get_execution_queue(queues_) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError + vals_usm_type = dpctl.utils.get_coerced_usm_type(usm_types_) + + mode = _get_indexing_mode(mode) + + x_ndim = x.ndim + if axis is None: + if x_ndim > 1: + raise ValueError( + "`axis` cannot be `None` for array of dimension `{}`".format( + x_ndim + ) + ) + axis = 0 + + if x_ndim > 0: + axis = normalize_axis_index(operator.index(axis), x_ndim) + x_sh = x.shape + if x_sh[axis] == 0 and indices.size != 0: + raise IndexError("cannot take non-empty indices from an empty axis") + val_shape = x.shape[:axis] + indices.shape + x.shape[axis + 1 :] + else: + if axis != 0: + raise ValueError("`axis` must be 0 for an array of dimension 0.") + val_shape = indices.shape + + if not isinstance(vals, dpt.usm_ndarray): + vals = dpt.asarray( + vals, dtype=x.dtype, usm_type=vals_usm_type, sycl_queue=exec_q + ) + # choose to throw here for consistency with `place` + if vals.size == 0: + raise ValueError( + "cannot put into non-empty indices along an empty axis" + ) + if vals.dtype == x.dtype: + rhs = vals + else: + rhs = dpt.astype(vals, x.dtype) + rhs = dpt.broadcast_to(rhs, val_shape) + + _manager = dpctl.utils.SequentialOrderManager[exec_q] + deps_ev = _manager.submitted_events + hev, put_ev = ti._put( + x, (indices,), rhs, axis, mode, sycl_queue=exec_q, depends=deps_ev + ) + _manager.add_event_pair(hev, put_ev) + + +def take(x, indices, /, *, axis=None, out=None, mode="wrap"): + """take(x, indices, axis=None, out=None, mode="wrap") + + Takes elements from an array along a given axis at given indices. + + Args: + x (usm_ndarray): + The array that elements will be taken from. + indices (usm_ndarray): + One-dimensional array of indices. + axis (int, optional): + The axis along which the values will be selected. + If ``x`` is one-dimensional, this argument is optional. + Default: ``None``. + out (Optional[usm_ndarray]): + Output array to populate. Array must have the correct + shape and the expected data type. + mode (str, optional): + How out-of-bounds indices will be handled. Possible values + are: + + - ``"wrap"``: clamps indices to (``-n <= i < n``), then wraps + negative indices. + - ``"clip"``: clips indices to (``0 <= i < n``). + + Default: ``"wrap"``. + + Returns: + usm_ndarray: + Array with shape + ``x.shape[:axis] + indices.shape + x.shape[axis + 1:]`` + filled with elements from ``x``. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError( + "Expected instance of `dpt.usm_ndarray`, got `{}`.".format(type(x)) + ) + + if not isinstance(indices, dpt.usm_ndarray): + raise TypeError( + "`indices` expected `dpt.usm_ndarray`, got `{}`.".format( + type(indices) + ) + ) + if indices.dtype.kind not in "ui": + raise IndexError( + "`indices` expected integer data type, got `{}`".format( + indices.dtype + ) + ) + if indices.ndim != 1: + raise ValueError( + "`indices` expected a 1D array, got `{}`".format(indices.ndim) + ) + exec_q = dpctl.utils.get_execution_queue([x.sycl_queue, indices.sycl_queue]) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError + res_usm_type = dpctl.utils.get_coerced_usm_type( + [x.usm_type, indices.usm_type] + ) + + mode = _get_indexing_mode(mode) + + x_ndim = x.ndim + if axis is None: + if x_ndim > 1: + raise ValueError( + "`axis` cannot be `None` for array of dimension `{}`".format( + x_ndim + ) + ) + axis = 0 + + if x_ndim > 0: + axis = normalize_axis_index(operator.index(axis), x_ndim) + x_sh = x.shape + if x_sh[axis] == 0 and indices.size != 0: + raise IndexError("cannot take non-empty indices from an empty axis") + res_shape = x.shape[:axis] + indices.shape + x.shape[axis + 1 :] + else: + if axis != 0: + raise ValueError("`axis` must be 0 for an array of dimension 0.") + res_shape = indices.shape + + dt = x.dtype + + orig_out = out + if out is not None: + if not isinstance(out, dpt.usm_ndarray): + raise TypeError( + f"output array must be of usm_ndarray type, got {type(out)}" + ) + if not out.flags.writable: + raise ValueError("provided `out` array is read-only") + + if out.shape != res_shape: + raise ValueError( + "The shape of input and output arrays are inconsistent. " + f"Expected output shape is {res_shape}, got {out.shape}" + ) + if dt != out.dtype: + raise ValueError( + f"Output array of type {dt} is needed, got {out.dtype}" + ) + if dpctl.utils.get_execution_queue((exec_q, out.sycl_queue)) is None: + raise dpctl.utils.ExecutionPlacementError( + "Input and output allocation queues are not compatible" + ) + if ti._array_overlap(x, out): + out = dpt.empty_like(out) + else: + out = dpt.empty( + res_shape, dtype=dt, usm_type=res_usm_type, sycl_queue=exec_q + ) + + _manager = dpctl.utils.SequentialOrderManager[exec_q] + deps_ev = _manager.submitted_events + hev, take_ev = ti._take( + x, (indices,), out, axis, mode, sycl_queue=exec_q, depends=deps_ev + ) + _manager.add_event_pair(hev, take_ev) + + if not (orig_out is None or out is orig_out): + # Copy the out data from temporary buffer to original memory + ht_e_cpy, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, dst=orig_out, sycl_queue=exec_q, depends=[take_ev] + ) + _manager.add_event_pair(ht_e_cpy, cpy_ev) + out = orig_out + + return out diff --git a/dpctl_ext/tensor/_numpy_helper.py b/dpctl_ext/tensor/_numpy_helper.py new file mode 100644 index 000000000000..4ad735823cb3 --- /dev/null +++ b/dpctl_ext/tensor/_numpy_helper.py @@ -0,0 +1,45 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + + +import numpy as np + +_npver = np.lib.NumpyVersion(np.__version__) + +if _npver < "1.25.0": # pragma: no cover + from numpy import AxisError +else: + from numpy.exceptions import AxisError + +if _npver >= "2.0.0": + from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple +else: # pragma: no cover + from numpy.core.numeric import normalize_axis_index, normalize_axis_tuple + + +__all__ = ["AxisError", "normalize_axis_index", "normalize_axis_tuple"] diff --git a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp new file mode 100644 index 000000000000..22189ee3129c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp @@ -0,0 +1,303 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for tensor constructors. +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "utils/offset_utils.hpp" +#include "utils/strided_iters.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::constructors +{ + +using dpctl::tensor::ssize_t; + +/*! + @defgroup CtorKernels + */ + +template +class full_strided_kernel; +// template class eye_kernel; + +using namespace dpctl::tensor::offset_utils; + +/* ================ Full ================== */ + +/*! + * @brief Function to submit kernel to fill given contiguous memory allocation + * with specified value. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Length of the sequence + * @param fill_v Value to fill the array with + * @param dst_p Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event full_contig_impl(sycl::queue &q, + std::size_t nelems, + dstTy fill_v, + char *dst_p, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(q); + sycl::event fill_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + dstTy *p = reinterpret_cast(dst_p); + cgh.fill(p, fill_v, nelems); + }); + + return fill_ev; +} + +template +class FullStridedFunctor +{ +private: + Ty *p = nullptr; + Ty fill_v; + IndexerT indexer; + +public: + FullStridedFunctor(Ty *p_, const Ty &fill_v_, const IndexerT &indexer_) + : p(p_), fill_v(fill_v_), indexer(indexer_) + { + } + + void operator()(sycl::id<1> id) const + { + auto offset = indexer(id.get(0)); + p[offset] = fill_v; + } +}; + +/*! + * @brief Function to submit kernel to fill given contiguous memory allocation + * with specified value. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nd Array dimensionality + * @param nelems Length of the sequence + * @param shape_strides Kernel accessible USM pointer to packed shape and + * strides of array. + * @param fill_v Value to fill the array with + * @param dst_p Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event full_strided_impl(sycl::queue &q, + int nd, + std::size_t nelems, + const ssize_t *shape_strides, + dstTy fill_v, + char *dst_p, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(q); + + dstTy *dst_tp = reinterpret_cast(dst_p); + + using dpctl::tensor::offset_utils::StridedIndexer; + const StridedIndexer strided_indexer(nd, 0, shape_strides); + + sycl::event fill_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using KernelName = full_strided_kernel; + using Impl = FullStridedFunctor; + + cgh.parallel_for(sycl::range<1>{nelems}, + Impl(dst_tp, fill_v, strided_indexer)); + }); + + return fill_ev; +} + +/* =========================== Tril and triu ============================== */ + +// define function type +typedef sycl::event (*tri_fn_ptr_t)(sycl::queue &, + ssize_t, // inner_range //ssize_t + ssize_t, // outer_range + char *, // src_data_ptr + char *, // dst_data_ptr + ssize_t, // nd + ssize_t *, // shape_and_strides + ssize_t, // k + const std::vector &, + const std::vector &); + +/*! + * @brief Function to copy triangular matrices from source stack to destination + * stack. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param inner_range Number of elements in each matrix. + * @param outer_range Number of matrices to copy. + * @param src_p Kernel accessible USM pointer for the source array. + * @param dst_p Kernel accessible USM pointer for the destination array. + * @param nd The array dimensionality of source and destination arrays. + * @param shape_and_strides Kernel accessible USM pointer to packed shape and + * strides of arrays. + * @param k Position of the diagonal above/below which to copy filling the rest + * with zero elements. + * @param depends List of events to wait for before starting computations, if + * any. + * @param additional_depends List of additional events to wait for before + * starting computations, if any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +class tri_kernel; +template +sycl::event tri_impl(sycl::queue &exec_q, + ssize_t inner_range, + ssize_t outer_range, + char *src_p, + char *dst_p, + ssize_t nd, + ssize_t *shape_and_strides, + ssize_t k, + const std::vector &depends, + const std::vector &additional_depends) +{ + static constexpr int d2 = 2; + ssize_t src_s = nd; + ssize_t dst_s = 2 * nd; + ssize_t nd_1 = nd - 1; + ssize_t nd_2 = nd - 2; + Ty *src = reinterpret_cast(src_p); + Ty *dst = reinterpret_cast(dst_p); + + dpctl::tensor::type_utils::validate_type_for_device(exec_q); + + sycl::event tri_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + cgh.depends_on(additional_depends); + + cgh.parallel_for>( + sycl::range<1>(inner_range * outer_range), [=](sycl::id<1> idx) { + ssize_t outer_gid = idx[0] / inner_range; + ssize_t inner_gid = idx[0] - inner_range * outer_gid; + + ssize_t src_inner_offset = 0, dst_inner_offset = 0; + bool to_copy{false}; + + { + using dpctl::tensor::strides::CIndexer_array; + CIndexer_array indexer_i( + {shape_and_strides[nd_2], shape_and_strides[nd_1]}); + indexer_i.set(inner_gid); + const std::array &inner = indexer_i.get(); + src_inner_offset = + inner[0] * shape_and_strides[src_s + nd_2] + + inner[1] * shape_and_strides[src_s + nd_1]; + dst_inner_offset = + inner[0] * shape_and_strides[dst_s + nd_2] + + inner[1] * shape_and_strides[dst_s + nd_1]; + + if constexpr (upper) + to_copy = (inner[0] + k >= inner[1]); + else + to_copy = (inner[0] + k <= inner[1]); + } + + ssize_t src_offset = 0; + ssize_t dst_offset = 0; + { + using dpctl::tensor::strides::CIndexer_vector; + CIndexer_vector outer(nd - d2); + outer.get_displacement( + outer_gid, shape_and_strides, shape_and_strides + src_s, + shape_and_strides + dst_s, src_offset, dst_offset); + } + + src_offset += src_inner_offset; + dst_offset += dst_inner_offset; + + dst[dst_offset] = (to_copy) ? src[src_offset] : Ty(0); + }); + }); + return tri_ev; +} + +/*! + * @brief Factory to get function pointer of type `fnT` for data type `Ty`. + * @ingroup CtorKernels + */ +template +struct TrilGenericFactory +{ + fnT get() + { + fnT f = tri_impl; + return f; + } +}; + +/*! + * @brief Factory to get function pointer of type `fnT` for data type `Ty`. + * @ingroup CtorKernels + */ +template +struct TriuGenericFactory +{ + fnT get() + { + fnT f = tri_impl; + return f; + } +}; + +} // namespace dpctl::tensor::kernels::constructors diff --git a/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp b/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp new file mode 100644 index 000000000000..7be2b3ea8591 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp @@ -0,0 +1,418 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for advanced tensor index operations. +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "utils/indexing_utils.hpp" +#include "utils/offset_utils.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::indexing +{ + +using dpctl::tensor::ssize_t; + +template +class TakeFunctor +{ +private: + const char *src_ = nullptr; + char *dst_ = nullptr; + char **ind_ = nullptr; + int k_ = 0; + std::size_t ind_nelems_ = 0; + const ssize_t *axes_shape_and_strides_ = nullptr; + OrthogIndexer orthog_strider; + IndicesIndexer ind_strider; + AxesIndexer axes_strider; + +public: + TakeFunctor(const char *src_cp, + char *dst_cp, + char **ind_cp, + int k, + std::size_t ind_nelems, + const ssize_t *axes_shape_and_strides, + const OrthogIndexer &orthog_strider_, + const IndicesIndexer &ind_strider_, + const AxesIndexer &axes_strider_) + : src_(src_cp), dst_(dst_cp), ind_(ind_cp), k_(k), + ind_nelems_(ind_nelems), + axes_shape_and_strides_(axes_shape_and_strides), + orthog_strider(orthog_strider_), ind_strider(ind_strider_), + axes_strider(axes_strider_) + { + } + + void operator()(sycl::id<1> id) const + { + const T *src = reinterpret_cast(src_); + T *dst = reinterpret_cast(dst_); + + ssize_t i_orthog = id / ind_nelems_; + ssize_t i_along = id - (i_orthog * ind_nelems_); + + auto orthog_offsets = orthog_strider(i_orthog); + + ssize_t src_offset = orthog_offsets.get_first_offset(); + ssize_t dst_offset = orthog_offsets.get_second_offset(); + + static constexpr ProjectorT proj{}; + for (int axis_idx = 0; axis_idx < k_; ++axis_idx) { + indT *ind_data = reinterpret_cast(ind_[axis_idx]); + + ssize_t ind_offset = ind_strider(i_along, axis_idx); + // proj produces an index in the range of the given axis + ssize_t projected_idx = + proj(axes_shape_and_strides_[axis_idx], ind_data[ind_offset]); + src_offset += + projected_idx * axes_shape_and_strides_[k_ + axis_idx]; + } + + dst_offset += axes_strider(i_along); + + dst[dst_offset] = src[src_offset]; + } +}; + +template +class take_kernel; + +typedef sycl::event (*take_fn_ptr_t)(sycl::queue &, + std::size_t, + std::size_t, + int, + int, + int, + const ssize_t *, + const ssize_t *, + const ssize_t *, + const char *, + char *, + char **, + ssize_t, + ssize_t, + const ssize_t *, + const std::vector &); + +template +sycl::event take_impl(sycl::queue &q, + std::size_t orthog_nelems, + std::size_t ind_nelems, + int nd, + int ind_nd, + int k, + const ssize_t *orthog_shape_and_strides, + const ssize_t *axes_shape_and_strides, + const ssize_t *ind_shape_and_strides, + const char *src_p, + char *dst_p, + char **ind_p, + ssize_t src_offset, + ssize_t dst_offset, + const ssize_t *ind_offsets, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(q); + + sycl::event take_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using OrthogIndexerT = + dpctl::tensor::offset_utils::TwoOffsets_StridedIndexer; + const OrthogIndexerT orthog_indexer{nd, src_offset, dst_offset, + orthog_shape_and_strides}; + + using NthStrideIndexerT = dpctl::tensor::offset_utils::NthStrideOffset; + const NthStrideIndexerT indices_indexer{ind_nd, ind_offsets, + ind_shape_and_strides}; + + using AxesIndexerT = dpctl::tensor::offset_utils::StridedIndexer; + const AxesIndexerT axes_indexer{ind_nd, 0, + axes_shape_and_strides + (2 * k)}; + + using KernelName = + take_kernel; + + const std::size_t gws = orthog_nelems * ind_nelems; + + cgh.parallel_for( + sycl::range<1>(gws), + TakeFunctor( + src_p, dst_p, ind_p, k, ind_nelems, axes_shape_and_strides, + orthog_indexer, indices_indexer, axes_indexer)); + }); + + return take_ev; +} + +template +class PutFunctor +{ +private: + char *dst_ = nullptr; + const char *val_ = nullptr; + char **ind_ = nullptr; + int k_ = 0; + std::size_t ind_nelems_ = 0; + const ssize_t *axes_shape_and_strides_ = nullptr; + OrthogIndexer orthog_strider; + IndicesIndexer ind_strider; + AxesIndexer axes_strider; + +public: + PutFunctor(char *dst_cp, + const char *val_cp, + char **ind_cp, + int k, + std::size_t ind_nelems, + const ssize_t *axes_shape_and_strides, + const OrthogIndexer &orthog_strider_, + const IndicesIndexer &ind_strider_, + const AxesIndexer &axes_strider_) + : dst_(dst_cp), val_(val_cp), ind_(ind_cp), k_(k), + ind_nelems_(ind_nelems), + axes_shape_and_strides_(axes_shape_and_strides), + orthog_strider(orthog_strider_), ind_strider(ind_strider_), + axes_strider(axes_strider_) + { + } + + void operator()(sycl::id<1> id) const + { + T *dst = reinterpret_cast(dst_); + const T *val = reinterpret_cast(val_); + + ssize_t i_orthog = id / ind_nelems_; + ssize_t i_along = id - (i_orthog * ind_nelems_); + + auto orthog_offsets = orthog_strider(i_orthog); + + ssize_t dst_offset = orthog_offsets.get_first_offset(); + ssize_t val_offset = orthog_offsets.get_second_offset(); + + static constexpr ProjectorT proj{}; + for (int axis_idx = 0; axis_idx < k_; ++axis_idx) { + indT *ind_data = reinterpret_cast(ind_[axis_idx]); + + ssize_t ind_offset = ind_strider(i_along, axis_idx); + + // proj produces an index in the range of the given axis + ssize_t projected_idx = + proj(axes_shape_and_strides_[axis_idx], ind_data[ind_offset]); + dst_offset += + projected_idx * axes_shape_and_strides_[k_ + axis_idx]; + } + + val_offset += axes_strider(i_along); + + dst[dst_offset] = val[val_offset]; + } +}; + +template +class put_kernel; + +typedef sycl::event (*put_fn_ptr_t)(sycl::queue &, + std::size_t, + std::size_t, + int, + int, + int, + const ssize_t *, + const ssize_t *, + const ssize_t *, + char *, + const char *, + char **, + ssize_t, + ssize_t, + const ssize_t *, + const std::vector &); + +template +sycl::event put_impl(sycl::queue &q, + std::size_t orthog_nelems, + std::size_t ind_nelems, + int nd, + int ind_nd, + int k, + const ssize_t *orthog_shape_and_strides, + const ssize_t *axes_shape_and_strides, + const ssize_t *ind_shape_and_strides, + char *dst_p, + const char *val_p, + char **ind_p, + ssize_t dst_offset, + ssize_t val_offset, + const ssize_t *ind_offsets, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(q); + + sycl::event put_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using OrthogIndexerT = + dpctl::tensor::offset_utils::TwoOffsets_StridedIndexer; + const OrthogIndexerT orthog_indexer{nd, dst_offset, val_offset, + orthog_shape_and_strides}; + + using NthStrideIndexerT = dpctl::tensor::offset_utils::NthStrideOffset; + const NthStrideIndexerT indices_indexer{ind_nd, ind_offsets, + ind_shape_and_strides}; + + using AxesIndexerT = dpctl::tensor::offset_utils::StridedIndexer; + const AxesIndexerT axes_indexer{ind_nd, 0, + axes_shape_and_strides + (2 * k)}; + + using KernelName = + put_kernel; + + const std::size_t gws = orthog_nelems * ind_nelems; + + cgh.parallel_for( + sycl::range<1>(gws), + PutFunctor( + dst_p, val_p, ind_p, k, ind_nelems, axes_shape_and_strides, + orthog_indexer, indices_indexer, axes_indexer)); + }); + + return put_ev; +} + +template +struct TakeWrapFactory +{ + fnT get() + { + if constexpr (std::is_integral::value && + !std::is_same::value) { + using dpctl::tensor::indexing_utils::WrapIndex; + fnT fn = take_impl, T, indT>; + return fn; + } + else { + fnT fn = nullptr; + return fn; + } + } +}; + +template +struct TakeClipFactory +{ + fnT get() + { + if constexpr (std::is_integral::value && + !std::is_same::value) { + using dpctl::tensor::indexing_utils::ClipIndex; + fnT fn = take_impl, T, indT>; + return fn; + } + else { + fnT fn = nullptr; + return fn; + } + } +}; + +template +struct PutWrapFactory +{ + fnT get() + { + if constexpr (std::is_integral::value && + !std::is_same::value) { + using dpctl::tensor::indexing_utils::WrapIndex; + fnT fn = put_impl, T, indT>; + return fn; + } + else { + fnT fn = nullptr; + return fn; + } + } +}; + +template +struct PutClipFactory +{ + fnT get() + { + if constexpr (std::is_integral::value && + !std::is_same::value) { + using dpctl::tensor::indexing_utils::ClipIndex; + fnT fn = put_impl, T, indT>; + return fn; + } + else { + fnT fn = nullptr; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::indexing diff --git a/dpctl_ext/tensor/libtensor/source/full_ctor.cpp b/dpctl_ext/tensor/libtensor/source/full_ctor.cpp new file mode 100644 index 000000000000..aef57836666e --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/full_ctor.cpp @@ -0,0 +1,311 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +#include "kernels/constructors.hpp" +#include "utils/output_validation.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" + +#include "full_ctor.hpp" + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace dpctl::tensor::py_internal +{ + +using dpctl::utils::keep_args_alive; + +typedef sycl::event (*full_contig_fn_ptr_t)(sycl::queue &, + std::size_t, + const py::object &, + char *, + const std::vector &); + +/*! + * @brief Function to submit kernel to fill given contiguous memory allocation + * with specified value. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Length of the sequence + * @param py_value Python object representing the value to fill the array with. + * Must be convertible to `dstTy`. + * @param dst_p Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event full_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const py::object &py_value, + char *dst_p, + const std::vector &depends) +{ + dstTy fill_v = py::cast(py_value); + + sycl::event fill_ev; + + if constexpr (sizeof(dstTy) == sizeof(char)) { + const auto memset_val = sycl::bit_cast(fill_v); + fill_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + cgh.memset(reinterpret_cast(dst_p), memset_val, + nelems * sizeof(dstTy)); + }); + } + else { + bool is_zero = false; + if constexpr (sizeof(dstTy) == 1) { + is_zero = (std::uint8_t{0} == sycl::bit_cast(fill_v)); + } + else if constexpr (sizeof(dstTy) == 2) { + is_zero = + (std::uint16_t{0} == sycl::bit_cast(fill_v)); + } + else if constexpr (sizeof(dstTy) == 4) { + is_zero = + (std::uint32_t{0} == sycl::bit_cast(fill_v)); + } + else if constexpr (sizeof(dstTy) == 8) { + is_zero = + (std::uint64_t{0} == sycl::bit_cast(fill_v)); + } + else if constexpr (sizeof(dstTy) == 16) { + struct UInt128 + { + + constexpr UInt128() : v1{}, v2{} {} + UInt128(const UInt128 &) = default; + + operator bool() const + { + return bool(!v1) && bool(!v2); + } + + std::uint64_t v1; + std::uint64_t v2; + }; + is_zero = static_cast(sycl::bit_cast(fill_v)); + } + + if (is_zero) { + static constexpr int memset_val = 0; + fill_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + cgh.memset(reinterpret_cast(dst_p), memset_val, + nelems * sizeof(dstTy)); + }); + } + else { + using dpctl::tensor::kernels::constructors::full_contig_impl; + + fill_ev = + full_contig_impl(exec_q, nelems, fill_v, dst_p, depends); + } + } + + return fill_ev; +} + +template +struct FullContigFactory +{ + fnT get() + { + fnT f = full_contig_impl; + return f; + } +}; + +typedef sycl::event (*full_strided_fn_ptr_t)(sycl::queue &, + int, + std::size_t, + py::ssize_t *, + const py::object &, + char *, + const std::vector &); + +/*! + * @brief Function to submit kernel to fill given strided memory allocation + * with specified value. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nd Array dimensionality + * @param nelems Length of the sequence + * @param shape_strides Kernel accessible USM pointer to packed shape and + * strides of array. + * @param py_value Python object representing the value to fill the array with. + * Must be convertible to `dstTy`. + * @param dst_p Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event full_strided_impl(sycl::queue &exec_q, + int nd, + std::size_t nelems, + py::ssize_t *shape_strides, + const py::object &py_value, + char *dst_p, + const std::vector &depends) +{ + dstTy fill_v = py::cast(py_value); + + using dpctl::tensor::kernels::constructors::full_strided_impl; + sycl::event fill_ev = full_strided_impl( + exec_q, nd, nelems, shape_strides, fill_v, dst_p, depends); + + return fill_ev; +} + +template +struct FullStridedFactory +{ + fnT get() + { + fnT f = full_strided_impl; + return f; + } +}; + +static full_contig_fn_ptr_t full_contig_dispatch_vector[td_ns::num_types]; +static full_strided_fn_ptr_t full_strided_dispatch_vector[td_ns::num_types]; + +std::pair + usm_ndarray_full(const py::object &py_value, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + // py_value should be coercible into data type of dst + + py::ssize_t dst_nelems = dst.get_size(); + + if (dst_nelems == 0) { + // nothing to do + return std::make_pair(sycl::event(), sycl::event()); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error( + "Execution queue is not compatible with the allocation queue"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_typenum = dst.get_typenum(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + char *dst_data = dst.get_data(); + + if (dst_nelems == 1 || dst.is_c_contiguous() || dst.is_f_contiguous()) { + auto fn = full_contig_dispatch_vector[dst_typeid]; + + sycl::event full_contig_event = + fn(exec_q, static_cast(dst_nelems), py_value, dst_data, + depends); + + return std::make_pair( + keep_args_alive(exec_q, {dst}, {full_contig_event}), + full_contig_event); + } + else { + int nd = dst.get_ndim(); + auto const &dst_shape = dst.get_shape_vector(); + auto const &dst_strides = dst.get_strides_vector(); + + auto fn = full_strided_dispatch_vector[dst_typeid]; + + std::vector host_task_events; + host_task_events.reserve(2); + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, dst_shape, dst_strides); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + const sycl::event ©_shape_ev = std::get<2>(ptr_size_event_tuple); + py::ssize_t *shape_strides = shape_strides_owner.get(); + + const sycl::event &full_strided_ev = + fn(exec_q, nd, dst_nelems, shape_strides, py_value, dst_data, + {copy_shape_ev}); + + // free shape_strides + const auto &temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {full_strided_ev}, shape_strides_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + return std::make_pair(keep_args_alive(exec_q, {dst}, host_task_events), + full_strided_ev); + } +} + +void init_full_ctor_dispatch_vectors(void) +{ + using namespace td_ns; + + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(full_contig_dispatch_vector); + + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(full_strided_dispatch_vector); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/full_ctor.hpp b/dpctl_ext/tensor/libtensor/source/full_ctor.hpp new file mode 100644 index 000000000000..18c15de87a40 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/full_ctor.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + usm_ndarray_full(const py::object &py_value, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_full_ctor_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp new file mode 100644 index 000000000000..925cc2e895ed --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp @@ -0,0 +1,817 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines implementation functions of dpctl.tensor.take and +/// dpctl.tensor.put +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "kernels/integer_advanced_indexing.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" +#include "utils/type_utils.hpp" + +#include "integer_advanced_indexing.hpp" + +#define INDEXING_MODES 2 +#define WRAP_MODE 0 +#define CLIP_MODE 1 + +namespace dpctl::tensor::py_internal +{ + +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::indexing::put_fn_ptr_t; +using dpctl::tensor::kernels::indexing::take_fn_ptr_t; + +static take_fn_ptr_t take_dispatch_table[INDEXING_MODES][td_ns::num_types] + [td_ns::num_types]; + +static put_fn_ptr_t put_dispatch_table[INDEXING_MODES][td_ns::num_types] + [td_ns::num_types]; + +namespace py = pybind11; + +using dpctl::utils::keep_args_alive; + +std::vector + _populate_kernel_params(sycl::queue &exec_q, + std::vector &host_task_events, + char **device_ind_ptrs, + py::ssize_t *device_ind_sh_st, + py::ssize_t *device_ind_offsets, + py::ssize_t *device_orthog_sh_st, + py::ssize_t *device_along_sh_st, + const py::ssize_t *inp_shape, + const py::ssize_t *arr_shape, + std::vector &inp_strides, + std::vector &arr_strides, + std::vector &ind_sh_sts, + std::vector &ind_ptrs, + std::vector &ind_offsets, + int axis_start, + int k, + int ind_nd, + int inp_nd, + int orthog_sh_elems, + int ind_sh_elems) +{ + + using usm_host_allocator_T = + dpctl::tensor::alloc_utils::usm_host_allocator; + using ptrT = std::vector; + + usm_host_allocator_T ptr_allocator(exec_q); + std::shared_ptr host_ind_ptrs_shp = + std::make_shared(k, ptr_allocator); + + using usm_host_allocatorT = + dpctl::tensor::alloc_utils::usm_host_allocator; + using shT = std::vector; + + usm_host_allocatorT sz_allocator(exec_q); + std::shared_ptr host_ind_sh_st_shp = + std::make_shared(ind_sh_elems * (k + 1), sz_allocator); + + std::shared_ptr host_ind_offsets_shp = + std::make_shared(k, sz_allocator); + + std::shared_ptr host_orthog_sh_st_shp = + std::make_shared(3 * orthog_sh_elems, sz_allocator); + + std::shared_ptr host_along_sh_st_shp = + std::make_shared(2 * (k + ind_sh_elems), sz_allocator); + + std::copy(ind_sh_sts.begin(), ind_sh_sts.end(), + host_ind_sh_st_shp->begin()); + std::copy(ind_ptrs.begin(), ind_ptrs.end(), host_ind_ptrs_shp->begin()); + std::copy(ind_offsets.begin(), ind_offsets.end(), + host_ind_offsets_shp->begin()); + + const sycl::event &device_ind_ptrs_copy_ev = exec_q.copy( + host_ind_ptrs_shp->data(), device_ind_ptrs, host_ind_ptrs_shp->size()); + + const sycl::event &device_ind_sh_st_copy_ev = + exec_q.copy(host_ind_sh_st_shp->data(), device_ind_sh_st, + host_ind_sh_st_shp->size()); + + const sycl::event &device_ind_offsets_copy_ev = exec_q.copy( + host_ind_offsets_shp->data(), device_ind_offsets, + host_ind_offsets_shp->size()); + + int orthog_nd = inp_nd - k; + + if (orthog_nd > 0) { + if (axis_start > 0) { + std::copy(inp_shape, inp_shape + axis_start, + host_orthog_sh_st_shp->begin()); + std::copy(inp_strides.begin(), inp_strides.begin() + axis_start, + host_orthog_sh_st_shp->begin() + orthog_sh_elems); + std::copy(arr_strides.begin(), arr_strides.begin() + axis_start, + host_orthog_sh_st_shp->begin() + 2 * orthog_sh_elems); + } + if (inp_nd > (axis_start + k)) { + std::copy(inp_shape + axis_start + k, inp_shape + inp_nd, + host_orthog_sh_st_shp->begin() + axis_start); + std::copy(inp_strides.begin() + axis_start + k, inp_strides.end(), + host_orthog_sh_st_shp->begin() + orthog_sh_elems + + axis_start); + + std::copy(arr_strides.begin() + axis_start + ind_nd, + arr_strides.end(), + host_orthog_sh_st_shp->begin() + 2 * orthog_sh_elems + + axis_start); + } + } + + if (inp_nd > 0) { + std::copy(inp_shape + axis_start, inp_shape + axis_start + k, + host_along_sh_st_shp->begin()); + + std::copy(inp_strides.begin() + axis_start, + inp_strides.begin() + axis_start + k, + host_along_sh_st_shp->begin() + k); + } + + if (ind_nd > 0) { + std::copy(arr_shape + axis_start, arr_shape + axis_start + ind_nd, + host_along_sh_st_shp->begin() + 2 * k); + std::copy(arr_strides.begin() + axis_start, + arr_strides.begin() + axis_start + ind_nd, + host_along_sh_st_shp->begin() + 2 * k + ind_nd); + } + + const sycl::event &device_orthog_sh_st_copy_ev = exec_q.copy( + host_orthog_sh_st_shp->data(), device_orthog_sh_st, + host_orthog_sh_st_shp->size()); + + const sycl::event &device_along_sh_st_copy_ev = exec_q.copy( + host_along_sh_st_shp->data(), device_along_sh_st, + host_along_sh_st_shp->size()); + + const sycl::event &shared_ptr_cleanup_ev = + exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on({device_along_sh_st_copy_ev, + device_orthog_sh_st_copy_ev, + device_ind_offsets_copy_ev, + device_ind_sh_st_copy_ev, device_ind_ptrs_copy_ev}); + cgh.host_task( + [host_ind_offsets_shp = std::move(host_ind_offsets_shp), + host_ind_sh_st_shp = std::move(host_ind_sh_st_shp), + host_ind_ptrs_shp = std::move(host_ind_ptrs_shp), + host_orthog_sh_st_shp = std::move(host_orthog_sh_st_shp), + host_along_sh_st_shp = std::move(host_along_sh_st_shp)] {}); + }); + host_task_events.push_back(shared_ptr_cleanup_ev); + + std::vector sh_st_pack_deps{ + device_ind_ptrs_copy_ev, device_ind_sh_st_copy_ev, + device_ind_offsets_copy_ev, device_orthog_sh_st_copy_ev, + device_along_sh_st_copy_ev}; + return sh_st_pack_deps; +} + +/* Utility to parse python object py_ind into vector of `usm_ndarray`s */ +std::vector parse_py_ind(const sycl::queue &q, + const py::object &py_ind) +{ + std::size_t ind_count = py::len(py_ind); + std::vector res; + res.reserve(ind_count); + + bool nd_is_known = false; + int nd = -1; + for (std::size_t i = 0; i < ind_count; ++i) { + py::object el_i = py_ind[py::cast(i)]; + dpctl::tensor::usm_ndarray arr_i = + py::cast(el_i); + if (!dpctl::utils::queues_are_compatible(q, {arr_i})) { + throw py::value_error("Index allocation queue is not compatible " + "with execution queue"); + } + if (nd_is_known) { + if (nd != arr_i.get_ndim()) { + throw py::value_error( + "Indices must have the same number of dimensions."); + } + } + else { + nd_is_known = true; + nd = arr_i.get_ndim(); + } + res.push_back(arr_i); + } + + return res; +} + +std::pair + usm_ndarray_take(const dpctl::tensor::usm_ndarray &src, + const py::object &py_ind, + const dpctl::tensor::usm_ndarray &dst, + int axis_start, + std::uint8_t mode, + sycl::queue &exec_q, + const std::vector &depends) +{ + std::vector ind = parse_py_ind(exec_q, py_ind); + + int k = ind.size(); + + if (k == 0) { + throw py::value_error("List of indices is empty."); + } + + if (axis_start < 0) { + throw py::value_error("Axis cannot be negative."); + } + + if (mode != 0 && mode != 1) { + throw py::value_error("Mode must be 0 or 1."); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + const dpctl::tensor::usm_ndarray ind_rep = ind[0]; + + int src_nd = src.get_ndim(); + int dst_nd = dst.get_ndim(); + int ind_nd = ind_rep.get_ndim(); + + auto sh_elems = std::max(src_nd, 1); + + if (axis_start + k > sh_elems) { + throw py::value_error("Axes are out of range for array of dimension " + + std::to_string(src_nd)); + } + if (src_nd == 0) { + if (dst_nd != ind_nd) { + throw py::value_error( + "Destination is not of appropriate dimension for take kernel."); + } + } + else { + if (dst_nd != (src_nd - k + ind_nd)) { + throw py::value_error( + "Destination is not of appropriate dimension for take kernel."); + } + } + + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + + bool orthog_shapes_equal(true); + std::size_t orthog_nelems(1); + for (int i = 0; i < (src_nd - k); ++i) { + auto idx1 = (i < axis_start) ? i : i + k; + auto idx2 = (i < axis_start) ? i : i + ind_nd; + + orthog_nelems *= static_cast(src_shape[idx1]); + orthog_shapes_equal = + orthog_shapes_equal && (src_shape[idx1] == dst_shape[idx2]); + } + + if (!orthog_shapes_equal) { + throw py::value_error( + "Axes of basic indices are not of matching shapes."); + } + + if (orthog_nelems == 0) { + return std::make_pair(sycl::event{}, sycl::event{}); + } + + char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(src, dst)) { + throw py::value_error("Array memory overlap."); + } + + py::ssize_t src_offset = py::ssize_t(0); + py::ssize_t dst_offset = py::ssize_t(0); + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + auto array_types = td_ns::usm_ndarray_types(); + int src_type_id = array_types.typenum_to_lookup_id(src_typenum); + int dst_type_id = array_types.typenum_to_lookup_id(dst_typenum); + + if (src_type_id != dst_type_id) { + throw py::type_error("Array data types are not the same."); + } + + const py::ssize_t *ind_shape = ind_rep.get_shape_raw(); + + int ind_typenum = ind_rep.get_typenum(); + int ind_type_id = array_types.typenum_to_lookup_id(ind_typenum); + + std::size_t ind_nelems(1); + for (int i = 0; i < ind_nd; ++i) { + ind_nelems *= static_cast(ind_shape[i]); + + if (!(ind_shape[i] == dst_shape[axis_start + i])) { + throw py::value_error( + "Indices shape does not match shape of axis in destination."); + } + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + dst, orthog_nelems * ind_nelems); + + int ind_sh_elems = std::max(ind_nd, 1); + + std::vector ind_ptrs; + ind_ptrs.reserve(k); + + std::vector ind_offsets; + ind_offsets.reserve(k); + + std::vector ind_sh_sts((k + 1) * ind_sh_elems, 0); + if (ind_nd > 0) { + std::copy(ind_shape, ind_shape + ind_nd, ind_sh_sts.begin()); + } + for (int i = 0; i < k; ++i) { + dpctl::tensor::usm_ndarray ind_ = ind[i]; + + if (!dpctl::utils::queues_are_compatible(exec_q, {ind_})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + // ndim, type, and shape are checked against the first array + if (i > 0) { + if (!(ind_.get_ndim() == ind_nd)) { + throw py::value_error("Index dimensions are not the same"); + } + + if (!(ind_type_id == + array_types.typenum_to_lookup_id(ind_.get_typenum()))) { + throw py::type_error( + "Indices array data types are not all the same."); + } + + const py::ssize_t *ind_shape_ = ind_.get_shape_raw(); + for (int dim = 0; dim < ind_nd; ++dim) { + if (!(ind_shape[dim] == ind_shape_[dim])) { + throw py::value_error("Indices shapes are not all equal."); + } + } + } + + // check for overlap with destination + if (overlap(dst, ind_)) { + throw py::value_error( + "Arrays index overlapping segments of memory"); + } + + char *ind_data = ind_.get_data(); + + // strides are initialized to 0 for 0D indices, so skip here + if (ind_nd > 0) { + auto ind_strides = ind_.get_strides_vector(); + std::copy(ind_strides.begin(), ind_strides.end(), + ind_sh_sts.begin() + (i + 1) * ind_nd); + } + + ind_ptrs.push_back(ind_data); + ind_offsets.push_back(py::ssize_t(0)); + } + + if (ind_nelems == 0) { + return std::make_pair(sycl::event{}, sycl::event{}); + } + + auto packed_ind_ptrs_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(k, exec_q); + char **packed_ind_ptrs = packed_ind_ptrs_owner.get(); + + // rearrange to past where indices shapes are checked + // packed_ind_shapes_strides = [ind_shape, + // ind[0] strides, + // ..., + // ind[k] strides] + auto packed_ind_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + (k + 1) * ind_sh_elems, exec_q); + py::ssize_t *packed_ind_shapes_strides = + packed_ind_shapes_strides_owner.get(); + + auto packed_ind_offsets_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(k, exec_q); + py::ssize_t *packed_ind_offsets = packed_ind_offsets_owner.get(); + + int orthog_sh_elems = std::max(src_nd - k, 1); + + // packed_shapes_strides = [src_shape[:axis] + src_shape[axis+k:], + // src_strides[:axis] + src_strides[axis+k:], + // dst_strides[:axis] + + // dst_strides[axis+ind.ndim:]] + auto packed_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + 3 * orthog_sh_elems, exec_q); + py::ssize_t *packed_shapes_strides = packed_shapes_strides_owner.get(); + + // packed_axes_shapes_strides = [src_shape[axis:axis+k], + // src_strides[axis:axis+k], + // dst_shape[axis:axis+ind.ndim], + // dst_strides[axis:axis+ind.ndim]] + auto packed_axes_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + 2 * (k + ind_sh_elems), exec_q); + py::ssize_t *packed_axes_shapes_strides = + packed_axes_shapes_strides_owner.get(); + + auto src_strides = src.get_strides_vector(); + auto dst_strides = dst.get_strides_vector(); + + std::vector host_task_events; + host_task_events.reserve(2); + + std::vector pack_deps = _populate_kernel_params( + exec_q, host_task_events, packed_ind_ptrs, packed_ind_shapes_strides, + packed_ind_offsets, packed_shapes_strides, packed_axes_shapes_strides, + src_shape, dst_shape, src_strides, dst_strides, ind_sh_sts, ind_ptrs, + ind_offsets, axis_start, k, ind_nd, src_nd, orthog_sh_elems, + ind_sh_elems); + + std::vector all_deps; + all_deps.reserve(depends.size() + pack_deps.size()); + all_deps.insert(std::end(all_deps), std::begin(pack_deps), + std::end(pack_deps)); + all_deps.insert(std::end(all_deps), std::begin(depends), std::end(depends)); + + auto fn = take_dispatch_table[mode][src_type_id][ind_type_id]; + + if (fn == nullptr) { + sycl::event::wait(host_task_events); + throw std::runtime_error("Indices must be integer type, got " + + std::to_string(ind_type_id)); + } + + sycl::event take_generic_ev = + fn(exec_q, orthog_nelems, ind_nelems, orthog_sh_elems, ind_sh_elems, k, + packed_shapes_strides, packed_axes_shapes_strides, + packed_ind_shapes_strides, src_data, dst_data, packed_ind_ptrs, + src_offset, dst_offset, packed_ind_offsets, all_deps); + + // free packed temporaries + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {take_generic_ev}, packed_shapes_strides_owner, + packed_axes_shapes_strides_owner, packed_ind_shapes_strides_owner, + packed_ind_ptrs_owner, packed_ind_offsets_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + sycl::event arg_cleanup_ev = + keep_args_alive(exec_q, {src, py_ind, dst}, host_task_events); + + return std::make_pair(arg_cleanup_ev, take_generic_ev); +} + +std::pair + usm_ndarray_put(const dpctl::tensor::usm_ndarray &dst, + const py::object &py_ind, + const dpctl::tensor::usm_ndarray &val, + int axis_start, + std::uint8_t mode, + sycl::queue &exec_q, + const std::vector &depends) +{ + std::vector ind = parse_py_ind(exec_q, py_ind); + int k = ind.size(); + + if (k == 0) { + // no indices to write to + throw py::value_error("List of indices is empty."); + } + + if (axis_start < 0) { + throw py::value_error("Axis cannot be negative."); + } + + if (mode != 0 && mode != 1) { + throw py::value_error("Mode must be 0 or 1."); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + const dpctl::tensor::usm_ndarray ind_rep = ind[0]; + + int dst_nd = dst.get_ndim(); + int val_nd = val.get_ndim(); + int ind_nd = ind_rep.get_ndim(); + + auto sh_elems = std::max(dst_nd, 1); + + if (axis_start + k > sh_elems) { + throw py::value_error("Axes are out of range for array of dimension " + + std::to_string(dst_nd)); + } + if (dst_nd == 0) { + if (val_nd != ind_nd) { + throw py::value_error("Destination is not of appropriate dimension " + "for put function."); + } + } + else { + if (val_nd != (dst_nd - k + ind_nd)) { + throw py::value_error("Destination is not of appropriate dimension " + "for put function."); + } + } + + std::size_t dst_nelems = dst.get_size(); + + const py::ssize_t *dst_shape = dst.get_shape_raw(); + const py::ssize_t *val_shape = val.get_shape_raw(); + + bool orthog_shapes_equal(true); + std::size_t orthog_nelems(1); + for (int i = 0; i < (dst_nd - k); ++i) { + auto idx1 = (i < axis_start) ? i : i + k; + auto idx2 = (i < axis_start) ? i : i + ind_nd; + + orthog_nelems *= static_cast(dst_shape[idx1]); + orthog_shapes_equal = + orthog_shapes_equal && (dst_shape[idx1] == val_shape[idx2]); + } + + if (!orthog_shapes_equal) { + throw py::value_error( + "Axes of basic indices are not of matching shapes."); + } + + if (orthog_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + char *dst_data = dst.get_data(); + char *val_data = val.get_data(); + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst, val})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(val, dst)) { + throw py::value_error("Arrays index overlapping segments of memory"); + } + + py::ssize_t dst_offset = py::ssize_t(0); + py::ssize_t val_offset = py::ssize_t(0); + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, dst_nelems); + + int dst_typenum = dst.get_typenum(); + int val_typenum = val.get_typenum(); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_type_id = array_types.typenum_to_lookup_id(dst_typenum); + int val_type_id = array_types.typenum_to_lookup_id(val_typenum); + + if (dst_type_id != val_type_id) { + throw py::type_error("Array data types are not the same."); + } + + const py::ssize_t *ind_shape = ind_rep.get_shape_raw(); + + int ind_typenum = ind_rep.get_typenum(); + int ind_type_id = array_types.typenum_to_lookup_id(ind_typenum); + + std::size_t ind_nelems(1); + for (int i = 0; i < ind_nd; ++i) { + ind_nelems *= static_cast(ind_shape[i]); + + if (!(ind_shape[i] == val_shape[axis_start + i])) { + throw py::value_error( + "Indices shapes does not match shape of axis in vals."); + } + } + + auto ind_sh_elems = std::max(ind_nd, 1); + + std::vector ind_ptrs; + ind_ptrs.reserve(k); + std::vector ind_offsets; + ind_offsets.reserve(k); + std::vector ind_sh_sts((k + 1) * ind_sh_elems, py::ssize_t(0)); + if (ind_nd > 0) { + std::copy(ind_shape, ind_shape + ind_sh_elems, ind_sh_sts.begin()); + } + for (int i = 0; i < k; ++i) { + dpctl::tensor::usm_ndarray ind_ = ind[i]; + + if (!dpctl::utils::queues_are_compatible(exec_q, {ind_})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + // ndim, type, and shape are checked against the first array + if (i > 0) { + if (!(ind_.get_ndim() == ind_nd)) { + throw py::value_error("Index dimensions are not the same"); + } + + if (!(ind_type_id == + array_types.typenum_to_lookup_id(ind_.get_typenum()))) { + throw py::type_error( + "Indices array data types are not all the same."); + } + + const py::ssize_t *ind_shape_ = ind_.get_shape_raw(); + for (int dim = 0; dim < ind_nd; ++dim) { + if (!(ind_shape[dim] == ind_shape_[dim])) { + throw py::value_error("Indices shapes are not all equal."); + } + } + } + + // check for overlap with destination + if (overlap(ind_, dst)) { + throw py::value_error( + "Arrays index overlapping segments of memory"); + } + + char *ind_data = ind_.get_data(); + + // strides are initialized to 0 for 0D indices, so skip here + if (ind_nd > 0) { + auto ind_strides = ind_.get_strides_vector(); + std::copy(ind_strides.begin(), ind_strides.end(), + ind_sh_sts.begin() + (i + 1) * ind_nd); + } + + ind_ptrs.push_back(ind_data); + ind_offsets.push_back(py::ssize_t(0)); + } + + if (ind_nelems == 0) { + return std::make_pair(sycl::event{}, sycl::event{}); + } + + auto packed_ind_ptrs_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(k, exec_q); + char **packed_ind_ptrs = packed_ind_ptrs_owner.get(); + + // packed_ind_shapes_strides = [ind_shape, + // ind[0] strides, + // ..., + // ind[k] strides] + auto packed_ind_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + (k + 1) * ind_sh_elems, exec_q); + py::ssize_t *packed_ind_shapes_strides = + packed_ind_shapes_strides_owner.get(); + + auto packed_ind_offsets_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(k, exec_q); + py::ssize_t *packed_ind_offsets = packed_ind_offsets_owner.get(); + + int orthog_sh_elems = std::max(dst_nd - k, 1); + + // packed_shapes_strides = [dst_shape[:axis] + dst_shape[axis+k:], + // dst_strides[:axis] + dst_strides[axis+k:], + // val_strides[:axis] + + // val_strides[axis+ind.ndim:]] + auto packed_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + 3 * orthog_sh_elems, exec_q); + py::ssize_t *packed_shapes_strides = packed_shapes_strides_owner.get(); + + // packed_axes_shapes_strides = [dst_shape[axis:axis+k], + // dst_strides[axis:axis+k], + // val_shape[axis:axis+ind.ndim], + // val_strides[axis:axis+ind.ndim]] + auto packed_axes_shapes_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + 2 * (k + ind_sh_elems), exec_q); + py::ssize_t *packed_axes_shapes_strides = + packed_axes_shapes_strides_owner.get(); + + auto dst_strides = dst.get_strides_vector(); + auto val_strides = val.get_strides_vector(); + + std::vector host_task_events; + host_task_events.reserve(2); + + std::vector pack_deps = _populate_kernel_params( + exec_q, host_task_events, packed_ind_ptrs, packed_ind_shapes_strides, + packed_ind_offsets, packed_shapes_strides, packed_axes_shapes_strides, + dst_shape, val_shape, dst_strides, val_strides, ind_sh_sts, ind_ptrs, + ind_offsets, axis_start, k, ind_nd, dst_nd, orthog_sh_elems, + ind_sh_elems); + + std::vector all_deps; + all_deps.reserve(depends.size() + pack_deps.size()); + all_deps.insert(std::end(all_deps), std::begin(pack_deps), + std::end(pack_deps)); + all_deps.insert(std::end(all_deps), std::begin(depends), std::end(depends)); + + auto fn = put_dispatch_table[mode][dst_type_id][ind_type_id]; + + if (fn == nullptr) { + sycl::event::wait(host_task_events); + throw std::runtime_error("Indices must be integer type, got " + + std::to_string(ind_type_id)); + } + + sycl::event put_generic_ev = + fn(exec_q, orthog_nelems, ind_nelems, orthog_sh_elems, ind_sh_elems, k, + packed_shapes_strides, packed_axes_shapes_strides, + packed_ind_shapes_strides, dst_data, val_data, packed_ind_ptrs, + dst_offset, val_offset, packed_ind_offsets, all_deps); + + // free packed temporaries + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {put_generic_ev}, packed_shapes_strides_owner, + packed_axes_shapes_strides_owner, packed_ind_shapes_strides_owner, + packed_ind_ptrs_owner, packed_ind_offsets_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + sycl::event arg_cleanup_ev = + keep_args_alive(exec_q, {dst, py_ind, val}, host_task_events); + + return std::make_pair(arg_cleanup_ev, put_generic_ev); +} + +void init_advanced_indexing_dispatch_tables(void) +{ + using namespace td_ns; + + using dpctl::tensor::kernels::indexing::TakeClipFactory; + DispatchTableBuilder + dtb_takeclip; + dtb_takeclip.populate_dispatch_table(take_dispatch_table[CLIP_MODE]); + + using dpctl::tensor::kernels::indexing::TakeWrapFactory; + DispatchTableBuilder + dtb_takewrap; + dtb_takewrap.populate_dispatch_table(take_dispatch_table[WRAP_MODE]); + + using dpctl::tensor::kernels::indexing::PutClipFactory; + DispatchTableBuilder dtb_putclip; + dtb_putclip.populate_dispatch_table(put_dispatch_table[CLIP_MODE]); + + using dpctl::tensor::kernels::indexing::PutWrapFactory; + DispatchTableBuilder dtb_putwrap; + dtb_putwrap.populate_dispatch_table(put_dispatch_table[WRAP_MODE]); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.hpp b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.hpp new file mode 100644 index 000000000000..bc0136288e1c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.hpp @@ -0,0 +1,71 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file declares Python API for implementation functions of +/// dpctl.tensor.take and dpctl.tensor.put +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + usm_ndarray_take(const dpctl::tensor::usm_ndarray &, + const py::object &, + const dpctl::tensor::usm_ndarray &, + int, + std::uint8_t, + sycl::queue &, + const std::vector & = {}); + +extern std::pair + usm_ndarray_put(const dpctl::tensor::usm_ndarray &, + const py::object &, + const dpctl::tensor::usm_ndarray &, + int, + std::uint8_t, + sycl::queue &, + const std::vector & = {}); + +extern void init_advanced_indexing_dispatch_tables(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 54d6adbc8f6e..0478fb19678c 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -53,17 +53,17 @@ // #include "copy_numpy_ndarray_into_usm_ndarray.hpp" #include "device_support_queries.hpp" // #include "eye_ctor.hpp" -// #include "full_ctor.hpp" -// #include "integer_advanced_indexing.hpp" +#include "full_ctor.hpp" +#include "integer_advanced_indexing.hpp" #include "kernels/dpctl_tensor_types.hpp" // #include "linear_sequences.hpp" // #include "repeat.hpp" #include "simplify_iteration_space.hpp" -// #include "triul_ctor.hpp" +#include "triul_ctor.hpp" #include "utils/memory_overlap.hpp" #include "utils/strided_iters.hpp" // #include "where.hpp" -// #include "zeros_ctor.hpp" +#include "zeros_ctor.hpp" namespace py = pybind11; @@ -102,15 +102,15 @@ using dpctl::tensor::py_internal::py_as_f_contig; /* ================ Full ================== */ -// using dpctl::tensor::py_internal::usm_ndarray_full; +using dpctl::tensor::py_internal::usm_ndarray_full; /* ================ Zeros ================== */ -// using dpctl::tensor::py_internal::usm_ndarray_zeros; +using dpctl::tensor::py_internal::usm_ndarray_zeros; /* ============== Advanced Indexing ============= */ -// using dpctl::tensor::py_internal::usm_ndarray_put; -// using dpctl::tensor::py_internal::usm_ndarray_take; +using dpctl::tensor::py_internal::usm_ndarray_put; +using dpctl::tensor::py_internal::usm_ndarray_take; // using dpctl::tensor::py_internal::py_extract; // using dpctl::tensor::py_internal::py_mask_positions; @@ -128,7 +128,7 @@ using dpctl::tensor::py_internal::py_as_f_contig; /* =========================== Tril and triu ============================== */ -// using dpctl::tensor::py_internal::usm_ndarray_triul; +using dpctl::tensor::py_internal::usm_ndarray_triul; /* =========================== Where ============================== */ @@ -144,7 +144,7 @@ void init_dispatch_tables(void) init_copy_and_cast_usm_to_usm_dispatch_tables(); // init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(); - // init_advanced_indexing_dispatch_tables(); + init_advanced_indexing_dispatch_tables(); // init_where_dispatch_tables(); return; } @@ -158,10 +158,10 @@ void init_dispatch_vectors(void) // init_copy_for_reshape_dispatch_vectors(); // init_copy_for_roll_dispatch_vectors(); // init_linear_sequences_dispatch_vectors(); - // init_full_ctor_dispatch_vectors(); - // init_zeros_ctor_dispatch_vectors(); + init_full_ctor_dispatch_vectors(); + init_zeros_ctor_dispatch_vectors(); // init_eye_ctor_dispatch_vectors(); - // init_triul_ctor_dispatch_vectors(); + init_triul_ctor_dispatch_vectors(); // populate_masked_extract_dispatch_vectors(); // populate_masked_place_dispatch_vectors(); @@ -299,22 +299,20 @@ PYBIND11_MODULE(_tensor_impl, m) // py::arg("shifts"), py::arg("sycl_queue"), py::arg("depends") = // py::list()); - // m.def("_linspace_step", &usm_ndarray_linear_sequence_step, - // "Fills input 1D contiguous usm_ndarray `dst` with linear sequence " - // "specified by " - // "starting point `start` and step `dt`. " - // "Returns a tuple of events: (ht_event, comp_event)", - // py::arg("start"), py::arg("dt"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); - - // m.def("_linspace_affine", &usm_ndarray_linear_sequence_affine, - // "Fills input 1D contiguous usm_ndarray `dst` with linear sequence " - // "specified by " - // "starting point `start` and end point `end`. " - // "Returns a tuple of events: (ht_event, comp_event)", - // py::arg("start"), py::arg("end"), py::arg("dst"), - // py::arg("include_endpoint"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + // m.def("_linspace_step", &usm_ndarray_linear_sequence_step, + // "Fills input 1D contiguous usm_ndarray `dst` with linear + // sequence " "specified by " "starting point `start` and step + // `dt`. " "Returns a tuple of events: (ht_event, comp_event)", + // py::arg("start"), py::arg("dt"), py::arg("dst"), + // py::arg("sycl_queue"), py::arg("depends") = py::list()); + + // m.def("_linspace_affine", &usm_ndarray_linear_sequence_affine, + // "Fills input 1D contiguous usm_ndarray `dst` with linear + // sequence " "specified by " "starting point `start` and end + // point `end`. " "Returns a tuple of events: (ht_event, + // comp_event)", py::arg("start"), py::arg("end"), py::arg("dst"), + // py::arg("include_endpoint"), py::arg("sycl_queue"), + // py::arg("depends") = py::list()); // m.def("_copy_numpy_ndarray_into_usm_ndarray", // ©_numpy_ndarray_into_usm_ndarray, @@ -322,32 +320,32 @@ PYBIND11_MODULE(_tensor_impl, m) // synchronously.", py::arg("src"), py::arg("dst"), // py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_zeros_usm_ndarray", &usm_ndarray_zeros, - // "Populate usm_ndarray `dst` with zeros.", py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_zeros_usm_ndarray", &usm_ndarray_zeros, + "Populate usm_ndarray `dst` with zeros.", py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_full_usm_ndarray", &usm_ndarray_full, - // "Populate usm_ndarray `dst` with given fill_value.", - // py::arg("fill_value"), py::arg("dst"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("_full_usm_ndarray", &usm_ndarray_full, + "Populate usm_ndarray `dst` with given fill_value.", + py::arg("fill_value"), py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); - // m.def("_take", &usm_ndarray_take, - // "Takes elements at usm_ndarray indices `ind` and axes starting " - // "at axis `axis_start` from array `src` and copies them " - // "into usm_ndarray `dst` synchronously." - // "Returns a tuple of events: (hev, ev)", - // py::arg("src"), py::arg("ind"), py::arg("dst"), - // py::arg("axis_start"), py::arg("mode"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); - - // m.def("_put", &usm_ndarray_put, - // "Puts elements at usm_ndarray indices `ind` and axes starting " - // "at axis `axis_start` into array `dst` from " - // "usm_ndarray `val` synchronously." - // "Returns a tuple of events: (hev, ev)", - // py::arg("dst"), py::arg("ind"), py::arg("val"), - // py::arg("axis_start"), py::arg("mode"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("_take", &usm_ndarray_take, + "Takes elements at usm_ndarray indices `ind` and axes starting " + "at axis `axis_start` from array `src` and copies them " + "into usm_ndarray `dst` synchronously." + "Returns a tuple of events: (hev, ev)", + py::arg("src"), py::arg("ind"), py::arg("dst"), py::arg("axis_start"), + py::arg("mode"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); + + m.def("_put", &usm_ndarray_put, + "Puts elements at usm_ndarray indices `ind` and axes starting " + "at axis `axis_start` into array `dst` from " + "usm_ndarray `val` synchronously." + "Returns a tuple of events: (hev, ev)", + py::arg("dst"), py::arg("ind"), py::arg("val"), py::arg("axis_start"), + py::arg("mode"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); // m.def("_eye", &usm_ndarray_eye, // "Fills input 2D contiguous usm_ndarray `dst` with " @@ -387,27 +385,27 @@ PYBIND11_MODULE(_tensor_impl, m) dpctl::tensor::py_internal::default_device_index_type, "Gives default index type supported by device.", py::arg("dev")); - // auto tril_fn = [](const dpctl::tensor::usm_ndarray &src, - // const dpctl::tensor::usm_ndarray &dst, py::ssize_t k, - // sycl::queue &exec_q, - // const std::vector depends) - // -> std::pair { - // return usm_ndarray_triul(exec_q, src, dst, 'l', k, depends); - // }; - // m.def("_tril", tril_fn, "Tril helper function.", py::arg("src"), - // py::arg("dst"), py::arg("k") = 0, py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + auto tril_fn = [](const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, py::ssize_t k, + sycl::queue &exec_q, + const std::vector depends) + -> std::pair { + return usm_ndarray_triul(exec_q, src, dst, 'l', k, depends); + }; + m.def("_tril", tril_fn, "Tril helper function.", py::arg("src"), + py::arg("dst"), py::arg("k") = 0, py::arg("sycl_queue"), + py::arg("depends") = py::list()); - // auto triu_fn = [](const dpctl::tensor::usm_ndarray &src, - // const dpctl::tensor::usm_ndarray &dst, py::ssize_t k, - // sycl::queue &exec_q, - // const std::vector depends) - // -> std::pair { - // return usm_ndarray_triul(exec_q, src, dst, 'u', k, depends); - // }; - // m.def("_triu", triu_fn, "Triu helper function.", py::arg("src"), - // py::arg("dst"), py::arg("k") = 0, py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + auto triu_fn = [](const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, py::ssize_t k, + sycl::queue &exec_q, + const std::vector depends) + -> std::pair { + return usm_ndarray_triul(exec_q, src, dst, 'u', k, depends); + }; + m.def("_triu", triu_fn, "Triu helper function.", py::arg("src"), + py::arg("dst"), py::arg("k") = 0, py::arg("sycl_queue"), + py::arg("depends") = py::list()); // m.def("mask_positions", &py_mask_positions, "", py::arg("mask"), // py::arg("cumsum"), py::arg("sycl_queue"), diff --git a/dpctl_ext/tensor/libtensor/source/triul_ctor.cpp b/dpctl_ext/tensor/libtensor/source/triul_ctor.cpp new file mode 100644 index 000000000000..13e909196460 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/triul_ctor.cpp @@ -0,0 +1,246 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#include // for std::copy +#include // for std::size_t +#include // for std::begin, std::end +#include // for std::make_shared +#include // for std::pair, std::move +#include // for std::vector, std::begin, std::end + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "kernels/constructors.hpp" +#include "simplify_iteration_space.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace dpctl::tensor::py_internal +{ + +using dpctl::utils::keep_args_alive; + +using dpctl::tensor::kernels::constructors::tri_fn_ptr_t; + +static tri_fn_ptr_t tril_generic_dispatch_vector[td_ns::num_types]; +static tri_fn_ptr_t triu_generic_dispatch_vector[td_ns::num_types]; + +std::pair + usm_ndarray_triul(sycl::queue &exec_q, + const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + char part, + py::ssize_t k = 0, + const std::vector &depends = {}) +{ + // array dimensions must be the same + int src_nd = src.get_ndim(); + int dst_nd = dst.get_ndim(); + if (src_nd != dst_nd) { + throw py::value_error("Array dimensions are not the same."); + } + + if (src_nd < 2) { + throw py::value_error("Array dimensions less than 2."); + } + + // shapes must be the same + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + + bool shapes_equal(true); + std::size_t src_nelems(1); + + for (int i = 0; shapes_equal && i < src_nd; ++i) { + src_nelems *= static_cast(src_shape[i]); + shapes_equal = shapes_equal && (src_shape[i] == dst_shape[i]); + } + if (!shapes_equal) { + throw py::value_error("Array shapes are not the same."); + } + + if (src_nelems == 0) { + // nothing to do + return std::make_pair(sycl::event(), sycl::event()); + } + + char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + + // check that arrays do not overlap, and concurrent copying is safe. + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(src, dst)) { + // TODO: could use a temporary, but this is done by the caller + throw py::value_error("Arrays index overlapping segments of memory"); + } + + auto array_types = td_ns::usm_ndarray_types(); + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + if (dst_typeid != src_typeid) { + throw py::value_error("Array dtype are not the same."); + } + + // check same queues + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue context is not the same as allocation contexts"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto src_strides = src.get_strides_vector(); + auto dst_strides = dst.get_strides_vector(); + + using shT = std::vector; + shT simplified_shape; + shT simplified_src_strides; + shT simplified_dst_strides; + py::ssize_t src_offset(0); + py::ssize_t dst_offset(0); + + int nd = src_nd - 2; + const py::ssize_t *shape = src_shape; + + const shT iter_src_strides(std::begin(src_strides), + std::begin(src_strides) + nd); + const shT iter_dst_strides(std::begin(dst_strides), + std::begin(dst_strides) + nd); + + simplify_iteration_space(nd, shape, iter_src_strides, iter_dst_strides, + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); + + if (src_offset != 0 || dst_offset != 0) { + throw py::value_error("Reversed slice for dst is not supported"); + } + + nd += 2; + + using usm_host_allocatorT = + dpctl::tensor::alloc_utils::usm_host_allocator; + using usmshT = std::vector; + + usm_host_allocatorT allocator(exec_q); + auto shp_host_shape_and_strides = + std::make_shared(3 * nd, allocator); + + std::copy(simplified_shape.begin(), simplified_shape.end(), + shp_host_shape_and_strides->begin()); + (*shp_host_shape_and_strides)[nd - 2] = src_shape[src_nd - 2]; + (*shp_host_shape_and_strides)[nd - 1] = src_shape[src_nd - 1]; + + std::copy(simplified_src_strides.begin(), simplified_src_strides.end(), + shp_host_shape_and_strides->begin() + nd); + (*shp_host_shape_and_strides)[2 * nd - 2] = src_strides[src_nd - 2]; + (*shp_host_shape_and_strides)[2 * nd - 1] = src_strides[src_nd - 1]; + + std::copy(simplified_dst_strides.begin(), simplified_dst_strides.end(), + shp_host_shape_and_strides->begin() + 2 * nd); + (*shp_host_shape_and_strides)[3 * nd - 2] = dst_strides[src_nd - 2]; + (*shp_host_shape_and_strides)[3 * nd - 1] = dst_strides[src_nd - 1]; + + auto dev_shape_and_strides_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(3 * nd, + exec_q); + py::ssize_t *dev_shape_and_strides = dev_shape_and_strides_owner.get(); + + const sycl::event ©_shape_and_strides = exec_q.copy( + shp_host_shape_and_strides->data(), dev_shape_and_strides, 3 * nd); + + py::ssize_t inner_range = src_shape[src_nd - 1] * src_shape[src_nd - 2]; + py::ssize_t outer_range = src_nelems / inner_range; + + sycl::event tri_ev; + if (part == 'l') { + auto fn = tril_generic_dispatch_vector[src_typeid]; + tri_ev = + fn(exec_q, inner_range, outer_range, src_data, dst_data, nd, + dev_shape_and_strides, k, depends, {copy_shape_and_strides}); + } + else { + auto fn = triu_generic_dispatch_vector[src_typeid]; + tri_ev = + fn(exec_q, inner_range, outer_range, src_data, dst_data, nd, + dev_shape_and_strides, k, depends, {copy_shape_and_strides}); + } + + const auto &temporaries_cleanup_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(tri_ev); + const auto &ctx = exec_q.get_context(); + using dpctl::tensor::alloc_utils::sycl_free_noexcept; + cgh.host_task( + [shp_host_shape_and_strides = std::move(shp_host_shape_and_strides), + dev_shape_and_strides, ctx]() { + // capture of shp_host_shape_and_strides ensure the underlying + // vector exists for the entire execution of copying kernel + sycl_free_noexcept(dev_shape_and_strides, ctx); + }); + }); + // since host_task now owns USM allocation, release ownership by smart + // pointer + dev_shape_and_strides_owner.release(); + + return std::make_pair( + keep_args_alive(exec_q, {src, dst}, {temporaries_cleanup_ev}), tri_ev); +} + +void init_triul_ctor_dispatch_vectors(void) +{ + + using namespace td_ns; + using dpctl::tensor::kernels::constructors::TrilGenericFactory; + using dpctl::tensor::kernels::constructors::TriuGenericFactory; + + DispatchVectorBuilder dvb1; + dvb1.populate_dispatch_vector(tril_generic_dispatch_vector); + + DispatchVectorBuilder dvb2; + dvb2.populate_dispatch_vector(triu_generic_dispatch_vector); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/triul_ctor.hpp b/dpctl_ext/tensor/libtensor/source/triul_ctor.hpp new file mode 100644 index 000000000000..47cc4ce8892d --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/triul_ctor.hpp @@ -0,0 +1,58 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#pragma once +#include +#include +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + usm_ndarray_triul(sycl::queue &exec_q, + const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + char part, + py::ssize_t k = 0, + const std::vector &depends = {}); + +extern void init_triul_ctor_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp b/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp new file mode 100644 index 000000000000..2eb05e49f382 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp @@ -0,0 +1,161 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +#include "utils/output_validation.hpp" +#include "utils/type_dispatch.hpp" + +#include "zeros_ctor.hpp" + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace dpctl::tensor::py_internal +{ + +using dpctl::utils::keep_args_alive; + +typedef sycl::event (*zeros_contig_fn_ptr_t)(sycl::queue &, + std::size_t, + char *, + const std::vector &); + +/*! + * @brief Function to submit kernel to fill given contiguous memory allocation + * with zeros. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Length of the sequence + * @param dst_p Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event zeros_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + char *dst_p, + const std::vector &depends) +{ + + static constexpr int memset_val(0); + sycl::event fill_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + cgh.memset(reinterpret_cast(dst_p), memset_val, + nelems * sizeof(dstTy)); + }); + + return fill_ev; +} + +template +struct ZerosContigFactory +{ + fnT get() + { + fnT f = zeros_contig_impl; + return f; + } +}; + +static zeros_contig_fn_ptr_t zeros_contig_dispatch_vector[td_ns::num_types]; + +std::pair + usm_ndarray_zeros(const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + py::ssize_t dst_nelems = dst.get_size(); + + if (dst_nelems == 0) { + // nothing to do + return std::make_pair(sycl::event(), sycl::event()); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error( + "Execution queue is not compatible with the allocation queue"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_typenum = dst.get_typenum(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + char *dst_data = dst.get_data(); + + if (dst_nelems == 1 || dst.is_c_contiguous() || dst.is_f_contiguous()) { + auto fn = zeros_contig_dispatch_vector[dst_typeid]; + + sycl::event zeros_contig_event = + fn(exec_q, static_cast(dst_nelems), dst_data, depends); + + return std::make_pair( + keep_args_alive(exec_q, {dst}, {zeros_contig_event}), + zeros_contig_event); + } + else { + throw std::runtime_error( + "Only population of contiguous usm_ndarray objects is supported."); + } +} + +void init_zeros_ctor_dispatch_vectors(void) +{ + using namespace td_ns; + + DispatchVectorBuilder + dvb; + dvb.populate_dispatch_vector(zeros_contig_dispatch_vector); + + return; +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp b/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp new file mode 100644 index 000000000000..51a1903a0f36 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp @@ -0,0 +1,54 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + usm_ndarray_zeros(const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_zeros_ctor_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_algo/dpnp_fill.py b/dpnp/dpnp_algo/dpnp_fill.py index 112ea3af0fdb..4137a2794747 100644 --- a/dpnp/dpnp_algo/dpnp_fill.py +++ b/dpnp/dpnp_algo/dpnp_fill.py @@ -31,14 +31,17 @@ import dpctl.tensor as dpt import dpctl.utils as dpu from dpctl.tensor._ctors import _cast_fill_val -from dpctl.tensor._tensor_impl import ( + +import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._tensor_impl import ( _copy_usm_ndarray_into_usm_ndarray, _full_usm_ndarray, _zeros_usm_ndarray, ) -import dpnp - def dpnp_fill(arr, val): arr = dpnp.get_usm_ndarray(arr) diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index 4975db17c717..c8e28529cd57 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -38,6 +38,7 @@ import dpctl.tensor as dpt import dpctl.utils as dpu +import dpctl_ext.tensor as dpt_ext import dpnp from dpnp.dpnp_array import dpnp_array @@ -228,7 +229,7 @@ def full( fill_value = fill_value.get_array() """Creates `dpnp_array` having a specified shape, filled with fill_value.""" - array_obj = dpt.full( + array_obj = dpt_ext.full( shape, fill_value, dtype=dtype, @@ -269,13 +270,13 @@ def ones( def tril(x1, /, *, k=0): """Creates `dpnp_array` as lower triangular part of an input array.""" - array_obj = dpt.tril(dpnp.get_usm_ndarray(x1), k=k) + array_obj = dpt_ext.tril(dpnp.get_usm_ndarray(x1), k=k) return dpnp_array._create_from_usm_ndarray(array_obj) def triu(x1, /, *, k=0): """Creates `dpnp_array` as upper triangular part of an input array.""" - array_obj = dpt.triu(dpnp.get_usm_ndarray(x1), k=k) + array_obj = dpt_ext.triu(dpnp.get_usm_ndarray(x1), k=k) return dpnp_array._create_from_usm_ndarray(array_obj) diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index 50b474014666..533bdc36c617 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -50,6 +50,7 @@ import numpy from dpctl.tensor._device import normalize_queue_device +# pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor._tensor_impl as ti diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index 7718412701e8..583561573b85 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -45,13 +45,18 @@ from collections.abc import Iterable import dpctl.tensor as dpt -import dpctl.tensor._tensor_impl as ti import dpctl.utils as dpu import numpy from dpctl.tensor._copy_utils import _nonzero_impl from dpctl.tensor._indexing_functions import _get_indexing_mode from dpctl.tensor._numpy_helper import normalize_axis_index +import dpctl_ext.tensor as dpt_ext + +# pylint: disable=no-name-in-module +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._tensor_impl as ti import dpnp # pylint: disable=no-name-in-module @@ -813,7 +818,7 @@ def extract(condition, a): usm_a = dpt.reshape(usm_a, -1) usm_cond = dpt.reshape(usm_cond, -1) - usm_res = dpt.take(usm_a, dpt.nonzero(usm_cond)[0]) + usm_res = dpt_ext.take(usm_a, dpt.nonzero(usm_cond)[0]) else: if usm_cond.shape != usm_a.shape: usm_a = dpt.reshape(usm_a, -1) @@ -1713,7 +1718,7 @@ def put(a, ind, v, /, *, axis=None, mode="wrap"): if axis is None and usm_a.ndim > 1: usm_a = dpt.reshape(usm_a, -1) - dpt.put(usm_a, usm_ind, usm_v, axis=axis, mode=mode) + dpt_ext.put(usm_a, usm_ind, usm_v, axis=axis, mode=mode) if in_usm_a._pointer != usm_a._pointer: # pylint: disable=protected-access in_usm_a[:] = dpt.reshape(usm_a, in_usm_a.shape, copy=False) diff --git a/dpnp/fft/dpnp_utils_fft.py b/dpnp/fft/dpnp_utils_fft.py index 709494e6255e..534b9404254f 100644 --- a/dpnp/fft/dpnp_utils_fft.py +++ b/dpnp/fft/dpnp_utils_fft.py @@ -42,6 +42,10 @@ from collections.abc import Sequence import dpctl + +# pylint: disable=no-name-in-module +# TODO: remove it when ti.__linspace_step +# is migrated to dpctl_ext/tensor import dpctl.tensor._tensor_impl as ti import dpctl.utils as dpu import numpy @@ -51,6 +55,10 @@ ) from dpctl.utils import ExecutionPlacementError +# pylint: disable=no-name-in-module +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._tensor_impl as ti_ext import dpnp import dpnp.backend.extensions.fft._fft_impl as fi @@ -196,8 +204,8 @@ def _compute_result(dsc, a, out, forward, c2c, out_strides): out_usm = None if out is None else dpnp.get_usm_ndarray(out) if ( out is not None - and out_usm.strides == tuple(out_strides) - and not ti._array_overlap(a_usm, out_usm) + and out.strides == tuple(out_strides) + and not ti_ext._array_overlap(a_usm, dpnp.get_usm_ndarray(out)) ): res_usm = out_usm result = out @@ -530,7 +538,7 @@ def _truncate_or_pad(a, shape, axes): ) _manager = dpu.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events - ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + ht_copy_ev, copy_ev = ti_ext._copy_usm_ndarray_into_usm_ndarray( src=dpnp.get_usm_ndarray(a), dst=z.get_array()[tuple(index)], sycl_queue=exec_q, diff --git a/dpnp/linalg/dpnp_utils_linalg.py b/dpnp/linalg/dpnp_utils_linalg.py index 6881c7787e9f..c6897e7b0614 100644 --- a/dpnp/linalg/dpnp_utils_linalg.py +++ b/dpnp/linalg/dpnp_utils_linalg.py @@ -42,12 +42,15 @@ from typing import NamedTuple -import dpctl.tensor._tensor_impl as ti import dpctl.utils as dpu import numpy from dpctl.tensor._numpy_helper import normalize_axis_index from numpy import prod +# pylint: disable=no-name-in-module +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._tensor_impl as ti import dpnp import dpnp.backend.extensions.lapack._lapack_impl as li from dpnp.dpnp_utils import get_usm_allocations diff --git a/dpnp/scipy/linalg/_utils.py b/dpnp/scipy/linalg/_utils.py index 9e5ae405ccc5..f7bdd5330d42 100644 --- a/dpnp/scipy/linalg/_utils.py +++ b/dpnp/scipy/linalg/_utils.py @@ -45,6 +45,7 @@ import dpctl.utils as dpu +# pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor._tensor_impl as ti From 195b89330a174c3d614db7b6207469413ae6bc55 Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Tue, 3 Mar 2026 11:25:26 +0100 Subject: [PATCH 02/11] Extend `._tensor_impl` with copy functions (#2774) This PR extends `_tensor_impl` in `dpctl_ext.tensor` with the copy functions (`_copy_usm_ndarray_for_reshape` , `_copy_numpy_ndarray_into_usm_ndarray`. `_copy_usm_ndarray_for_roll_1d`, `_copy_usm_ndarray_for_roll_nd`) It also adds `asnumpy(), astype(), copy(), from_numpy(), to_numpy(), roll(), and reshape()` to `dpctl_ext.tensor` and updates the corresponding dpnp functions to use these implementations internally --- dpctl_ext/tensor/CMakeLists.txt | 6 +- dpctl_ext/tensor/__init__.py | 18 + dpctl_ext/tensor/_copy_utils.py | 755 ++++++++++++++++++ dpctl_ext/tensor/_ctors.py | 5 +- dpctl_ext/tensor/_indexing_functions.py | 5 +- dpctl_ext/tensor/_manipulation_functions.py | 120 +++ dpctl_ext/tensor/_reshape.py | 209 +++++ .../libtensor/source/copy_for_reshape.cpp | 184 +++++ .../libtensor/source/copy_for_reshape.hpp | 54 ++ .../tensor/libtensor/source/copy_for_roll.cpp | 400 ++++++++++ .../tensor/libtensor/source/copy_for_roll.hpp | 65 ++ .../copy_numpy_ndarray_into_usm_ndarray.cpp | 368 +++++++++ .../copy_numpy_ndarray_into_usm_ndarray.hpp | 57 ++ .../tensor/libtensor/source/tensor_ctors.cpp | 71 +- dpnp/dpnp_algo/dpnp_arraycreation.py | 9 +- dpnp/dpnp_algo/dpnp_elementwise_common.py | 13 +- dpnp/dpnp_algo/dpnp_fill.py | 6 +- dpnp/dpnp_array.py | 5 +- dpnp/dpnp_container.py | 4 +- dpnp/dpnp_iface.py | 3 +- dpnp/dpnp_iface_arraycreation.py | 15 +- dpnp/dpnp_iface_indexing.py | 39 +- dpnp/dpnp_iface_manipulation.py | 11 +- dpnp/dpnp_iface_sorting.py | 5 +- dpnp/dpnp_iface_statistics.py | 5 +- dpnp/tests/test_arraycreation.py | 5 +- 26 files changed, 2351 insertions(+), 86 deletions(-) create mode 100644 dpctl_ext/tensor/_copy_utils.py create mode 100644 dpctl_ext/tensor/_manipulation_functions.py create mode 100644 dpctl_ext/tensor/_reshape.py create mode 100644 dpctl_ext/tensor/libtensor/source/copy_for_reshape.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/copy_for_reshape.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/copy_for_roll.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index fd781a9f9586..93555981deaa 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -48,9 +48,9 @@ set(_tensor_impl_sources # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/accumulators.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_and_cast_usm_to_usm.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_as_contig.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_reshape.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_roll.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_reshape.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_roll.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/integer_advanced_indexing.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_advanced_indexing.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index 3c6939eff7a0..edb2c096bad1 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -27,6 +27,13 @@ # ***************************************************************************** +from dpctl_ext.tensor._copy_utils import ( + asnumpy, + astype, + copy, + from_numpy, + to_numpy, +) from dpctl_ext.tensor._ctors import ( full, tril, @@ -36,11 +43,22 @@ put, take, ) +from dpctl_ext.tensor._manipulation_functions import ( + roll, +) +from dpctl_ext.tensor._reshape import reshape __all__ = [ + "asnumpy", + "astype", + "copy", + "from_numpy", "full", "put", + "reshape", + "roll", "take", + "to_numpy", "tril", "triu", ] diff --git a/dpctl_ext/tensor/_copy_utils.py b/dpctl_ext/tensor/_copy_utils.py new file mode 100644 index 000000000000..c62218893a2c --- /dev/null +++ b/dpctl_ext/tensor/_copy_utils.py @@ -0,0 +1,755 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import builtins + +import dpctl +import dpctl.memory as dpm +import dpctl.tensor as dpt +import dpctl.utils +import numpy as np +from dpctl.tensor._data_types import _get_dtype +from dpctl.tensor._device import normalize_queue_device +from dpctl.tensor._type_utils import _dtype_supported_by_device_impl + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._tensor_impl as ti + +__doc__ = ( + "Implementation module for copy- and cast- operations on " + ":class:`dpctl.tensor.usm_ndarray`." +) + +int32_t_max = 1 + np.iinfo(np.int32).max + + +def _copy_to_numpy(ary): + if not isinstance(ary, dpt.usm_ndarray): + raise TypeError(f"Expected dpctl.tensor.usm_ndarray, got {type(ary)}") + if ary.size == 0: + # no data needs to be copied for zero sized array + return np.ndarray(ary.shape, dtype=ary.dtype) + nb = ary.usm_data.nbytes + q = ary.sycl_queue + hh = dpm.MemoryUSMHost(nb, queue=q) + h = np.ndarray(nb, dtype="u1", buffer=hh).view(ary.dtype) + itsz = ary.itemsize + strides_bytes = tuple(si * itsz for si in ary.strides) + offset = ary._element_offset * itsz + # ensure that content of ary.usm_data is final + q.wait() + hh.copy_from_device(ary.usm_data) + return np.ndarray( + ary.shape, + dtype=ary.dtype, + buffer=h, + strides=strides_bytes, + offset=offset, + ) + + +def _copy_from_numpy(np_ary, usm_type="device", sycl_queue=None): + """Copies numpy array `np_ary` into a new usm_ndarray""" + # This may perform a copy to meet stated requirements + Xnp = np.require(np_ary, requirements=["A", "E"]) + alloc_q = normalize_queue_device(sycl_queue=sycl_queue, device=None) + dt = Xnp.dtype + if dt.char in "dD" and alloc_q.sycl_device.has_aspect_fp64 is False: + Xusm_dtype = ( + dpt.dtype("float32") if dt.char == "d" else dpt.dtype("complex64") + ) + else: + Xusm_dtype = dt + Xusm = dpt.empty( + Xnp.shape, dtype=Xusm_dtype, usm_type=usm_type, sycl_queue=sycl_queue + ) + _copy_from_numpy_into(Xusm, Xnp) + return Xusm + + +def _copy_from_numpy_into(dst, np_ary): + """Copies `np_ary` into `dst` of type :class:`dpctl.tensor.usm_ndarray""" + if not isinstance(np_ary, np.ndarray): + raise TypeError(f"Expected numpy.ndarray, got {type(np_ary)}") + if not isinstance(dst, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(dst)}") + if np_ary.flags["OWNDATA"]: + Xnp = np_ary + else: + # Determine base of input array + base = np_ary.base + while isinstance(base, np.ndarray): + base = base.base + if isinstance(base, dpm._memory._Memory): + # we must perform a copy, since subsequent + # _copy_numpy_ndarray_into_usm_ndarray is implemented using + # sycl::buffer, and using USM-pointers with sycl::buffer + # results is undefined behavior + Xnp = np_ary.copy() + else: + Xnp = np_ary + src_ary = np.broadcast_to(Xnp, dst.shape) + copy_q = dst.sycl_queue + if copy_q.sycl_device.has_aspect_fp64 is False: + src_ary_dt_c = src_ary.dtype.char + if src_ary_dt_c == "d": + src_ary = src_ary.astype(np.float32) + elif src_ary_dt_c == "D": + src_ary = src_ary.astype(np.complex64) + _manager = dpctl.utils.SequentialOrderManager[copy_q] + dep_ev = _manager.submitted_events + # synchronizing call + ti._copy_numpy_ndarray_into_usm_ndarray( + src=src_ary, dst=dst, sycl_queue=copy_q, depends=dep_ev + ) + + +def from_numpy(np_ary, /, *, device=None, usm_type="device", sycl_queue=None): + """ + from_numpy(arg, device=None, usm_type="device", sycl_queue=None) + + Creates :class:`dpctl.tensor.usm_ndarray` from instance of + :class:`numpy.ndarray`. + + Args: + arg: + Input convertible to :class:`numpy.ndarray` + device (object): array API specification of device where the + output array is created. Device can be specified by + a filter selector string, an instance of + :class:`dpctl.SyclDevice`, an instance of + :class:`dpctl.SyclQueue`, or an instance of + :class:`dpctl.tensor.Device`. If the value is ``None``, + returned array is created on the default-selected device. + Default: ``None`` + usm_type (str): The requested USM allocation type for the + output array. Recognized values are ``"device"``, + ``"shared"``, or ``"host"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + A SYCL queue that determines output array allocation device + as well as execution placement of data movement operations. + The ``device`` and ``sycl_queue`` arguments + are equivalent. Only one of them should be specified. If both + are provided, they must be consistent and result in using the + same execution queue. Default: ``None`` + + The returned array has the same shape, and the same data type kind. + If the device does not support the data type of input array, a + closest support data type of the same kind may be returned, e.g. + input array of type ``float16`` may be upcast to ``float32`` if the + target device does not support 16-bit floating point type. + """ + q = normalize_queue_device(sycl_queue=sycl_queue, device=device) + return _copy_from_numpy(np_ary, usm_type=usm_type, sycl_queue=q) + + +def to_numpy(usm_ary, /): + """ + to_numpy(usm_ary) + + Copies content of :class:`dpctl.tensor.usm_ndarray` instance ``usm_ary`` + into :class:`numpy.ndarray` instance of the same shape and same data type. + + Args: + usm_ary (usm_ndarray): + Input array + Returns: + :class:`numpy.ndarray`: + An instance of :class:`numpy.ndarray` populated with content of + ``usm_ary`` + """ + return _copy_to_numpy(usm_ary) + + +def asnumpy(usm_ary): + """ + asnumpy(usm_ary) + + Copies content of :class:`dpctl.tensor.usm_ndarray` instance ``usm_ary`` + into :class:`numpy.ndarray` instance of the same shape and same data + type. + + Args: + usm_ary (usm_ndarray): + Input array + Returns: + :class:`numpy.ndarray`: + An instance of :class:`numpy.ndarray` populated with content + of ``usm_ary`` + """ + return _copy_to_numpy(usm_ary) + + +class Dummy: + """Helper class with specified ``__sycl_usm_array_interface__`` attribute""" + + def __init__(self, iface): + self.__sycl_usm_array_interface__ = iface + + +def _copy_overlapping(dst, src): + """Assumes src and dst have the same shape.""" + q = normalize_queue_device(sycl_queue=dst.sycl_queue) + tmp = dpt.usm_ndarray( + src.shape, + dtype=src.dtype, + buffer="device", + order="C", + buffer_ctor_kwargs={"queue": q}, + ) + _manager = dpctl.utils.SequentialOrderManager[q] + dep_evs = _manager.submitted_events + hcp1, cp1 = ti._copy_usm_ndarray_into_usm_ndarray( + src=src, dst=tmp, sycl_queue=q, depends=dep_evs + ) + _manager.add_event_pair(hcp1, cp1) + hcp2, cp2 = ti._copy_usm_ndarray_into_usm_ndarray( + src=tmp, dst=dst, sycl_queue=q, depends=[cp1] + ) + _manager.add_event_pair(hcp2, cp2) + + +def _copy_same_shape(dst, src): + """Assumes src and dst have the same shape.""" + # check that memory regions do not overlap + if ti._array_overlap(dst, src): + if src._pointer == dst._pointer and ( + src is dst + or (src.strides == dst.strides and src.dtype == dst.dtype) + ): + return + _copy_overlapping(src=src, dst=dst) + return + + copy_q = dst.sycl_queue + _manager = dpctl.utils.SequentialOrderManager[copy_q] + dep_evs = _manager.submitted_events + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=src, dst=dst, sycl_queue=copy_q, depends=dep_evs + ) + _manager.add_event_pair(hev, cpy_ev) + + +if hasattr(np, "broadcast_shapes"): + + def _broadcast_shapes(sh1, sh2): + return np.broadcast_shapes(sh1, sh2) + +else: + + def _broadcast_shapes(sh1, sh2): + # use arrays with zero strides, whose memory footprint + # is independent of the number of array elements + return np.broadcast( + np.empty(sh1, dtype=[]), + np.empty(sh2, dtype=[]), + ).shape + + +def _broadcast_strides(X_shape, X_strides, res_ndim): + """ + Broadcasts strides to match the given dimensions; + returns tuple type strides. + """ + out_strides = [0] * res_ndim + X_shape_len = len(X_shape) + str_dim = -X_shape_len + for i in range(X_shape_len): + shape_value = X_shape[i] + if not shape_value == 1: + out_strides[str_dim] = X_strides[i] + str_dim += 1 + + return tuple(out_strides) + + +def _copy_from_usm_ndarray_to_usm_ndarray(dst, src): + if any( + not isinstance(arg, dpt.usm_ndarray) + for arg in ( + dst, + src, + ) + ): + raise TypeError( + "Both types are expected to be dpctl.tensor.usm_ndarray, " + f"got {type(dst)} and {type(src)}." + ) + + if dst.ndim == src.ndim and dst.shape == src.shape: + _copy_same_shape(dst, src) + return + + try: + common_shape = _broadcast_shapes(dst.shape, src.shape) + except ValueError as exc: + raise ValueError("Shapes of two arrays are not compatible") from exc + + if dst.size < src.size and dst.size < np.prod(common_shape): + raise ValueError("Destination is smaller ") + + if len(common_shape) > dst.ndim: + ones_count = len(common_shape) - dst.ndim + for k in range(ones_count): + if common_shape[k] != 1: + raise ValueError + common_shape = common_shape[ones_count:] + + if src.ndim < len(common_shape): + new_src_strides = _broadcast_strides( + src.shape, src.strides, len(common_shape) + ) + src_same_shape = dpt.usm_ndarray( + common_shape, + dtype=src.dtype, + buffer=src, + strides=new_src_strides, + offset=src._element_offset, + ) + elif src.ndim == len(common_shape): + new_src_strides = _broadcast_strides( + src.shape, src.strides, len(common_shape) + ) + src_same_shape = dpt.usm_ndarray( + common_shape, + dtype=src.dtype, + buffer=src, + strides=new_src_strides, + offset=src._element_offset, + ) + else: + # since broadcasting succeeded, src.ndim is greater because of + # leading sequence of ones, so we trim it + n = len(common_shape) + new_src_strides = _broadcast_strides( + src.shape[-n:], src.strides[-n:], n + ) + src_same_shape = dpt.usm_ndarray( + common_shape, + dtype=src.dtype, + buffer=src.usm_data, + strides=new_src_strides, + offset=src._element_offset, + ) + + _copy_same_shape(dst, src_same_shape) + + +def _make_empty_like_orderK(x, dt, usm_type, dev): + """ + Returns empty array with shape and strides like `x`, with dtype `dt`, + USM type `usm_type`, on device `dev`. + """ + st = list(x.strides) + perm = sorted( + range(x.ndim), + key=lambda d: builtins.abs(st[d]) if x.shape[d] > 1 else 0, + reverse=True, + ) + inv_perm = sorted(range(x.ndim), key=lambda i: perm[i]) + sh = x.shape + sh_sorted = tuple(sh[i] for i in perm) + R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + if min(st) < 0: + st_sorted = [st[i] for i in perm] + sl = tuple( + ( + slice(None, None, -1) + if st_sorted[i] < 0 + else slice(None, None, None) + ) + for i in range(x.ndim) + ) + R = R[sl] + return dpt.permute_dims(R, inv_perm) + + +def _empty_like_orderK(x, dt, usm_type=None, dev=None): + """ + Returns empty array like `x`, using order='K' + + For an array `x` that was obtained by permutation of a contiguous + array the returned array will have the same shape and the same + strides as `x`. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(x)}") + if usm_type is None: + usm_type = x.usm_type + if dev is None: + dev = x.device + fl = x.flags + if fl["C"] or x.size <= 1: + return dpt.empty_like( + x, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) + elif fl["F"]: + return dpt.empty_like( + x, dtype=dt, usm_type=usm_type, device=dev, order="F" + ) + return _make_empty_like_orderK(x, dt, usm_type, dev) + + +def _from_numpy_empty_like_orderK(x, dt, usm_type, dev): + """ + Returns empty usm_ndarray like NumPy array `x`, using order='K' + + For an array `x` that was obtained by permutation of a contiguous + array the returned array will have the same shape and the same + strides as `x`. + """ + if not isinstance(x, np.ndarray): + raise TypeError(f"Expected numpy.ndarray, got {type(x)}") + fl = x.flags + if fl["C"] or x.size <= 1: + return dpt.empty( + x.shape, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) + elif fl["F"]: + return dpt.empty( + x.shape, dtype=dt, usm_type=usm_type, device=dev, order="F" + ) + return _make_empty_like_orderK(x, dt, usm_type, dev) + + +def _empty_like_pair_orderK(X1, X2, dt, res_shape, usm_type, dev): + if not isinstance(X1, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(X1)}") + if not isinstance(X2, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(X2)}") + nd1 = X1.ndim + nd2 = X2.ndim + if nd1 > nd2 and X1.shape == res_shape: + return _empty_like_orderK(X1, dt, usm_type, dev) + elif nd1 < nd2 and X2.shape == res_shape: + return _empty_like_orderK(X2, dt, usm_type, dev) + fl1 = X1.flags + fl2 = X2.flags + if fl1["C"] or fl2["C"]: + return dpt.empty( + res_shape, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) + if fl1["F"] and fl2["F"]: + return dpt.empty( + res_shape, dtype=dt, usm_type=usm_type, device=dev, order="F" + ) + st1 = list(X1.strides) + st2 = list(X2.strides) + max_ndim = max(nd1, nd2) + st1 += [0] * (max_ndim - len(st1)) + st2 += [0] * (max_ndim - len(st2)) + sh1 = list(X1.shape) + [0] * (max_ndim - nd1) + sh2 = list(X2.shape) + [0] * (max_ndim - nd2) + perm = sorted( + range(max_ndim), + key=lambda d: ( + builtins.abs(st1[d]) if sh1[d] > 1 else 0, + builtins.abs(st2[d]) if sh2[d] > 1 else 0, + ), + reverse=True, + ) + inv_perm = sorted(range(max_ndim), key=lambda i: perm[i]) + st1_sorted = [st1[i] for i in perm] + st2_sorted = [st2[i] for i in perm] + sh = res_shape + sh_sorted = tuple(sh[i] for i in perm) + R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + if max(min(st1_sorted), min(st2_sorted)) < 0: + sl = tuple( + ( + slice(None, None, -1) + if (st1_sorted[i] < 0 and st2_sorted[i] < 0) + else slice(None, None, None) + ) + for i in range(nd1) + ) + R = R[sl] + return dpt.permute_dims(R, inv_perm) + + +def _empty_like_triple_orderK(X1, X2, X3, dt, res_shape, usm_type, dev): + if not isinstance(X1, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(X1)}") + if not isinstance(X2, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(X2)}") + if not isinstance(X3, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray, got {type(X3)}") + nd1 = X1.ndim + nd2 = X2.ndim + nd3 = X3.ndim + if X1.shape == res_shape and X2.shape == res_shape and len(res_shape) > nd3: + return _empty_like_pair_orderK(X1, X2, dt, res_shape, usm_type, dev) + elif ( + X2.shape == res_shape and X3.shape == res_shape and len(res_shape) > nd1 + ): + return _empty_like_pair_orderK(X2, X3, dt, res_shape, usm_type, dev) + elif ( + X1.shape == res_shape and X3.shape == res_shape and len(res_shape) > nd2 + ): + return _empty_like_pair_orderK(X1, X3, dt, res_shape, usm_type, dev) + fl1 = X1.flags + fl2 = X2.flags + fl3 = X3.flags + if fl1["C"] or fl2["C"] or fl3["C"]: + return dpt.empty( + res_shape, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) + if fl1["F"] and fl2["F"] and fl3["F"]: + return dpt.empty( + res_shape, dtype=dt, usm_type=usm_type, device=dev, order="F" + ) + st1 = list(X1.strides) + st2 = list(X2.strides) + st3 = list(X3.strides) + max_ndim = max(nd1, nd2, nd3) + st1 += [0] * (max_ndim - len(st1)) + st2 += [0] * (max_ndim - len(st2)) + st3 += [0] * (max_ndim - len(st3)) + sh1 = list(X1.shape) + [0] * (max_ndim - nd1) + sh2 = list(X2.shape) + [0] * (max_ndim - nd2) + sh3 = list(X3.shape) + [0] * (max_ndim - nd3) + perm = sorted( + range(max_ndim), + key=lambda d: ( + builtins.abs(st1[d]) if sh1[d] > 1 else 0, + builtins.abs(st2[d]) if sh2[d] > 1 else 0, + builtins.abs(st3[d]) if sh3[d] > 1 else 0, + ), + reverse=True, + ) + inv_perm = sorted(range(max_ndim), key=lambda i: perm[i]) + st1_sorted = [st1[i] for i in perm] + st2_sorted = [st2[i] for i in perm] + st3_sorted = [st3[i] for i in perm] + sh = res_shape + sh_sorted = tuple(sh[i] for i in perm) + R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + if max(min(st1_sorted), min(st2_sorted), min(st3_sorted)) < 0: + sl = tuple( + ( + slice(None, None, -1) + if ( + st1_sorted[i] < 0 + and st2_sorted[i] < 0 + and st3_sorted[i] < 0 + ) + else slice(None, None, None) + ) + for i in range(nd1) + ) + R = R[sl] + return dpt.permute_dims(R, inv_perm) + + +def copy(usm_ary, /, *, order="K"): + """copy(ary, order="K") + + Creates a copy of given instance of :class:`dpctl.tensor.usm_ndarray`. + + Args: + ary (usm_ndarray): + Input array + order (``"C"``, ``"F"``, ``"A"``, ``"K"``, optional): + Controls the memory layout of the output array + Returns: + usm_ndarray: + A copy of the input array. + + Memory layout of the copy is controlled by ``order`` keyword, + following NumPy's conventions. The ``order`` keywords can be + one of the following: + + .. list-table:: + + * - ``"C"`` + - C-contiguous memory layout + * - ``"F"`` + - Fortran-contiguous memory layout + * - ``"A"`` + - Fortran-contiguous if the input array is also Fortran-contiguous, + otherwise C-contiguous + * - ``"K"`` + - match the layout of ``usm_ary`` as closely as possible. + + """ + if len(order) == 0 or order[0] not in "KkAaCcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'K', 'A', 'F', or 'C'." + ) + order = order[0].upper() + if not isinstance(usm_ary, dpt.usm_ndarray): + raise TypeError( + f"Expected object of type dpt.usm_ndarray, got {type(usm_ary)}" + ) + copy_order = "C" + if order == "C": + pass + elif order == "F": + copy_order = order + elif order == "A": + if usm_ary.flags.f_contiguous: + copy_order = "F" + elif order == "K": + if usm_ary.flags.f_contiguous: + copy_order = "F" + else: + raise ValueError( + "Unrecognized value of the order keyword. " + "Recognized values are 'A', 'C', 'F', or 'K'" + ) + if order == "K": + R = _empty_like_orderK(usm_ary, usm_ary.dtype) + else: + R = dpt.usm_ndarray( + usm_ary.shape, + dtype=usm_ary.dtype, + buffer=usm_ary.usm_type, + order=copy_order, + buffer_ctor_kwargs={"queue": usm_ary.sycl_queue}, + ) + _copy_same_shape(R, usm_ary) + return R + + +def astype( + usm_ary, newdtype, /, *, order="K", casting="unsafe", copy=True, device=None +): + """astype(array, new_dtype, order="K", casting="unsafe", \ + copy=True, device=None) + + Returns a copy of the :class:`dpctl.tensor.usm_ndarray`, cast to a + specified type. + + Args: + array (usm_ndarray): + An input array. + new_dtype (dtype): + The data type of the resulting array. If `None`, gives default + floating point type supported by device where the resulting array + will be located. + order ({"C", "F", "A", "K"}, optional): + Controls memory layout of the resulting array if a copy + is returned. + casting ({'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional): + Controls what kind of data casting may occur. Please see + :meth:`numpy.ndarray.astype` for description of casting modes. + copy (bool, optional): + By default, `astype` always returns a newly allocated array. + If this keyword is set to `False`, a view of the input array + may be returned when possible. + device (object): array API specification of device where the + output array is created. Device can be specified by + a filter selector string, an instance of + :class:`dpctl.SyclDevice`, an instance of + :class:`dpctl.SyclQueue`, or an instance of + :class:`dpctl.tensor.Device`. If the value is `None`, + returned array is created on the same device as `array`. + Default: `None`. + + Returns: + usm_ndarray: + An array with requested data type. + + A view can be returned, if possible, when `copy=False` is used. + """ + if not isinstance(usm_ary, dpt.usm_ndarray): + return TypeError( + f"Expected object of type dpt.usm_ndarray, got {type(usm_ary)}" + ) + if len(order) == 0 or order[0] not in "KkAaCcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'K', 'A', 'F', or 'C'." + ) + order = order[0].upper() + ary_dtype = usm_ary.dtype + if device is not None: + if not isinstance(device, dpctl.SyclQueue): + if isinstance(device, dpt.Device): + device = device.sycl_queue + else: + device = dpt.Device.create_device(device).sycl_queue + d = device.sycl_device + target_dtype = _get_dtype(newdtype, device) + if not _dtype_supported_by_device_impl( + target_dtype, d.has_aspect_fp16, d.has_aspect_fp64 + ): + raise ValueError( + f"Requested dtype '{target_dtype}' is not supported by the " + "target device" + ) + usm_ary = usm_ary.to_device(device) + else: + target_dtype = _get_dtype(newdtype, usm_ary.sycl_queue) + + if not dpt.can_cast(ary_dtype, target_dtype, casting=casting): + raise TypeError( + f"Can not cast from {ary_dtype} to {newdtype} " + f"according to rule {casting}." + ) + c_contig = usm_ary.flags.c_contiguous + f_contig = usm_ary.flags.f_contiguous + needs_copy = copy or not ary_dtype == target_dtype + if not needs_copy and (order != "K"): + # ensure that order="F" for C-contig input triggers copy, + # and order="C" for F-contig input triggers copy too. + # 1D arrays which are both C- and F- contig should not + # force copying for neither order="F", nor order="C", see gh-1926 + needs_copy = ( + c_contig and not f_contig and order not in ["A", "C"] + ) or (not c_contig and f_contig and order not in ["A", "F"]) + if not needs_copy: + return usm_ary + copy_order = "C" + if order == "C": + pass + elif order == "F": + copy_order = order + elif order == "A": + if usm_ary.flags.f_contiguous: + copy_order = "F" + elif order == "K": + if usm_ary.flags.f_contiguous: + copy_order = "F" + else: + raise ValueError( + "Unrecognized value of the order keyword. " + "Recognized values are 'A', 'C', 'F', or 'K'" + ) + if order == "K": + R = _empty_like_orderK(usm_ary, target_dtype) + else: + R = dpt.usm_ndarray( + usm_ary.shape, + dtype=target_dtype, + buffer=usm_ary.usm_type, + order=copy_order, + buffer_ctor_kwargs={"queue": usm_ary.sycl_queue}, + ) + _copy_from_usm_ndarray_to_usm_ndarray(R, usm_ary) + return R diff --git a/dpctl_ext/tensor/_ctors.py b/dpctl_ext/tensor/_ctors.py index a0e7b28e66ff..5a39e9367e9c 100644 --- a/dpctl_ext/tensor/_ctors.py +++ b/dpctl_ext/tensor/_ctors.py @@ -36,6 +36,9 @@ from dpctl.tensor._data_types import _get_dtype from dpctl.tensor._device import normalize_queue_device +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti @@ -147,7 +150,7 @@ def full( usm_type=usm_type, sycl_queue=sycl_queue, ) - return dpt.copy(dpt.broadcast_to(X, shape), order=order) + return dpt_ext.copy(dpt.broadcast_to(X, shape), order=order) else: _validate_fill_value(fill_value) diff --git a/dpctl_ext/tensor/_indexing_functions.py b/dpctl_ext/tensor/_indexing_functions.py index 106df09cf97e..df4f3e953042 100644 --- a/dpctl_ext/tensor/_indexing_functions.py +++ b/dpctl_ext/tensor/_indexing_functions.py @@ -32,6 +32,9 @@ import dpctl.tensor as dpt import dpctl.utils +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti from ._numpy_helper import normalize_axis_index @@ -185,7 +188,7 @@ def put_vec_duplicates(vec, ind, vals): if vals.dtype == x.dtype: rhs = vals else: - rhs = dpt.astype(vals, x.dtype) + rhs = dpt_ext.astype(vals, x.dtype) rhs = dpt.broadcast_to(rhs, val_shape) _manager = dpctl.utils.SequentialOrderManager[exec_q] diff --git a/dpctl_ext/tensor/_manipulation_functions.py b/dpctl_ext/tensor/_manipulation_functions.py new file mode 100644 index 000000000000..fa8fc27876b3 --- /dev/null +++ b/dpctl_ext/tensor/_manipulation_functions.py @@ -0,0 +1,120 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import operator + +import dpctl.tensor as dpt +import dpctl.utils as dputils +import numpy as np + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._tensor_impl as ti + +from ._numpy_helper import normalize_axis_tuple + +__doc__ = ( + "Implementation module for array manipulation " + "functions in :module:`dpctl.tensor`" +) + + +def roll(x, /, shift, *, axis=None): + """ + roll(x, shift, axis) + + Rolls array elements along a specified axis. + Array elements that roll beyond the last position are re-introduced + at the first position. Array elements that roll beyond the first position + are re-introduced at the last position. + + Args: + x (usm_ndarray): input array + shift (Union[int, Tuple[int,...]]): number of places by which the + elements are shifted. If `shift` is a tuple, then `axis` must be a + tuple of the same size, and each of the given axes must be shifted + by the corresponding element in `shift`. If `shift` is an `int` + and `axis` a tuple, then the same `shift` must be used for all + specified axes. If a `shift` is positive, then array elements is + shifted positively (toward larger indices) along the dimension of + `axis`. + If a `shift` is negative, then array elements must be shifted + negatively (toward smaller indices) along the dimension of `axis`. + axis (Optional[Union[int, Tuple[int,...]]]): axis (or axes) along which + elements to shift. If `axis` is `None`, the array is + flattened, shifted, and then restored to its original shape. + Default: `None`. + + Returns: + usm_ndarray: + An array having the same `dtype`, `usm_type` and + `device` attributes as `x` and whose elements are shifted relative + to `x`. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(x)}.") + exec_q = x.sycl_queue + _manager = dputils.SequentialOrderManager[exec_q] + if axis is None: + shift = operator.index(shift) + res = dpt.empty( + x.shape, dtype=x.dtype, usm_type=x.usm_type, sycl_queue=exec_q + ) + sz = operator.index(x.size) + shift = (shift % sz) if sz > 0 else 0 + dep_evs = _manager.submitted_events + hev, roll_ev = ti._copy_usm_ndarray_for_roll_1d( + src=x, + dst=res, + shift=shift, + sycl_queue=exec_q, + depends=dep_evs, + ) + _manager.add_event_pair(hev, roll_ev) + return res + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + broadcasted = np.broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError("'shift' and 'axis' should be scalars or 1D sequences") + shifts = [ + 0, + ] * x.ndim + shape = x.shape + for sh, ax in broadcasted: + n_i = operator.index(shape[ax]) + shifted = shifts[ax] + operator.index(sh) + shifts[ax] = (shifted % n_i) if n_i > 0 else 0 + res = dpt.empty( + x.shape, dtype=x.dtype, usm_type=x.usm_type, sycl_queue=exec_q + ) + dep_evs = _manager.submitted_events + ht_e, roll_ev = ti._copy_usm_ndarray_for_roll_nd( + src=x, dst=res, shifts=shifts, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_e, roll_ev) + return res diff --git a/dpctl_ext/tensor/_reshape.py b/dpctl_ext/tensor/_reshape.py new file mode 100644 index 000000000000..61aa6c9c754f --- /dev/null +++ b/dpctl_ext/tensor/_reshape.py @@ -0,0 +1,209 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import operator + +import dpctl.tensor as dpt +import dpctl.utils +import numpy as np + +# TODO: revert to `from dpctl.tensor._tensor_impl...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._tensor_impl import ( + _copy_usm_ndarray_for_reshape, + _ravel_multi_index, + _unravel_index, +) + +__doc__ = "Implementation module for :func:`dpctl.tensor.reshape`." + + +def _make_unit_indexes(shape): + """ + Construct a diagonal matrix with with one on the diagonal + except if the corresponding element of shape is 1. + """ + nd = len(shape) + mi = np.zeros((nd, nd), dtype="u4") + for i, dim in enumerate(shape): + mi[i, i] = 1 if dim > 1 else 0 + return mi + + +def ti_unravel_index(flat_index, shape, order="C"): + return _unravel_index(flat_index, shape, order) + + +def ti_ravel_multi_index(multi_index, shape, order="C"): + return _ravel_multi_index(multi_index, shape, order) + + +def reshaped_strides(old_sh, old_sts, new_sh, order="C"): + """ + When reshaping array with `old_sh` shape and `old_sts` strides + into the new shape `new_sh`, returns the new stride if the reshape + can be a view, otherwise returns `None`. + """ + eye_new_mi = _make_unit_indexes(new_sh) + new_sts = [ + sum( + st_i * ind_i + for st_i, ind_i in zip( + old_sts, ti_unravel_index(flat_index, old_sh, order=order) + ) + ) + for flat_index in [ + ti_ravel_multi_index(unitvec, new_sh, order=order) + for unitvec in eye_new_mi + ] + ] + eye_old_mi = _make_unit_indexes(old_sh) + check_sts = [ + sum( + st_i * ind_i + for st_i, ind_i in zip( + new_sts, ti_unravel_index(flat_index, new_sh, order=order) + ) + ) + for flat_index in [ + ti_ravel_multi_index(unitvec, old_sh, order=order) + for unitvec in eye_old_mi + ] + ] + valid = all( + check_st == old_st or old_dim == 1 + for check_st, old_st, old_dim in zip(check_sts, old_sts, old_sh) + ) + return new_sts if valid else None + + +def reshape(X, /, shape, *, order="C", copy=None): + """reshape(x, shape, order="C") + + Reshapes array ``x`` into new shape. + + Args: + x (usm_ndarray): + input array + shape (Tuple[int]): + the desired shape of the resulting array. + order ("C", "F", optional): + memory layout of the resulting array + if a copy is found to be necessary. Supported + choices are ``"C"`` for C-contiguous, or row-major layout; + and ``"F"`` for F-contiguous, or column-major layout. + + Returns: + usm_ndarray: + Reshaped array is a view, if possible, + and a copy otherwise with memory layout as indicated + by ``order`` keyword. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError + if not isinstance(shape, (list, tuple)): + shape = (shape,) + if order in "cfCF": + order = order.upper() + else: + raise ValueError( + f"Keyword 'order' not recognized. Expecting 'C' or 'F', got {order}" + ) + if copy not in (True, False, None): + raise ValueError( + f"Keyword 'copy' not recognized. Expecting True, False, " + f"or None, got {copy}" + ) + shape = [operator.index(d) for d in shape] + negative_ones_count = 0 + for nshi in shape: + if nshi == -1: + negative_ones_count = negative_ones_count + 1 + if (nshi < -1) or negative_ones_count > 1: + raise ValueError( + "Target shape should have at most 1 negative " + "value which can only be -1" + ) + if negative_ones_count: + sz = -np.prod(shape) + if sz == 0: + raise ValueError( + f"Can not reshape array of size {X.size} into " + f"shape {tuple(i for i in shape if i >= 0)}" + ) + v = X.size // sz + shape = [v if d == -1 else d for d in shape] + if X.size != np.prod(shape): + raise ValueError(f"Can not reshape into {shape}") + if X.size: + newsts = reshaped_strides(X.shape, X.strides, shape, order=order) + else: + newsts = (1,) * len(shape) + copy_required = newsts is None + if copy_required and (copy is False): + raise ValueError( + "Reshaping the array requires a copy, but no copying was " + "requested by using copy=False" + ) + copy_q = X.sycl_queue + if copy_required or (copy is True): + # must perform a copy + copy_q = X.sycl_queue + flat_res = dpt.usm_ndarray( + (X.size,), + dtype=X.dtype, + buffer=X.usm_type, + buffer_ctor_kwargs={"queue": copy_q}, + ) + _manager = dpctl.utils.SequentialOrderManager[copy_q] + dep_evs = _manager.submitted_events + if order == "C": + hev, r_e = _copy_usm_ndarray_for_reshape( + src=X, dst=flat_res, sycl_queue=copy_q, depends=dep_evs + ) + else: + X_t = dpt.permute_dims(X, range(X.ndim - 1, -1, -1)) + hev, r_e = _copy_usm_ndarray_for_reshape( + src=X_t, dst=flat_res, sycl_queue=copy_q, depends=dep_evs + ) + _manager.add_event_pair(hev, r_e) + return dpt.usm_ndarray( + tuple(shape), dtype=X.dtype, buffer=flat_res, order=order + ) + # can form a view + if (len(shape) == X.ndim) and all( + s1 == s2 for s1, s2 in zip(shape, X.shape) + ): + return X + return dpt.usm_ndarray( + shape, + dtype=X.dtype, + buffer=X, + strides=tuple(newsts), + offset=X._element_offset, + ) diff --git a/dpctl_ext/tensor/libtensor/source/copy_for_reshape.cpp b/dpctl_ext/tensor/libtensor/source/copy_for_reshape.cpp new file mode 100644 index 000000000000..524bfcfdb98b --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_for_reshape.cpp @@ -0,0 +1,184 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "copy_for_reshape.hpp" +#include "kernels/copy_and_cast.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::copy_and_cast::copy_for_reshape_fn_ptr_t; +using dpctl::utils::keep_args_alive; + +// define static vector +static copy_for_reshape_fn_ptr_t + copy_for_reshape_generic_dispatch_vector[td_ns::num_types]; + +/* + * Copies src into dst (same data type) of different shapes by using flat + * iterations. + * + * Equivalent to the following loop: + * + * for i for range(src.size): + * dst[np.multi_index(i, dst.shape)] = src[np.multi_index(i, src.shape)] + */ +std::pair + copy_usm_ndarray_for_reshape(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + py::ssize_t src_nelems = src.get_size(); + py::ssize_t dst_nelems = dst.get_size(); + + // Must have the same number of elements + if (src_nelems != dst_nelems) { + throw py::value_error( + "copy_usm_ndarray_for_reshape requires src and dst to " + "have the same number of elements."); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + // typenames must be the same + if (src_typenum != dst_typenum) { + throw py::value_error( + "copy_usm_ndarray_for_reshape requires src and dst to " + "have the same type."); + } + + if (src_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, src_nelems); + + // check same contexts + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + if (src_nelems == 1) { + // handle special case of 1-element array + int src_elemsize = src.get_elemsize(); + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + sycl::event copy_ev = + exec_q.copy(src_data, dst_data, src_elemsize, depends); + return std::make_pair(keep_args_alive(exec_q, {src, dst}, {copy_ev}), + copy_ev); + } + + // dimensions may be different + int src_nd = src.get_ndim(); + int dst_nd = dst.get_ndim(); + + auto array_types = td_ns::usm_ndarray_types(); + int type_id = array_types.typenum_to_lookup_id(src_typenum); + + auto fn = copy_for_reshape_generic_dispatch_vector[type_id]; + + auto src_shape = src.get_shape_vector(); + auto src_strides = src.get_strides_vector(); + + auto dst_shape = dst.get_shape_vector(); + auto dst_strides = dst.get_strides_vector(); + + std::vector host_task_events; + host_task_events.reserve(2); + + // shape_strides = [src_shape, src_strides, dst_shape, dst_strides] + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, src_shape, src_strides, dst_shape, + dst_strides); + auto copy_shape_ev = std::get<2>(ptr_size_event_tuple); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + const py::ssize_t *shape_strides = shape_strides_owner.get(); + + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + + std::vector all_deps(depends.size() + 1); + all_deps.push_back(copy_shape_ev); + all_deps.insert(std::end(all_deps), std::begin(depends), std::end(depends)); + + sycl::event copy_for_reshape_event = + fn(exec_q, src_nelems, src_nd, dst_nd, shape_strides, src_data, + dst_data, all_deps); + + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {copy_for_reshape_event}, shape_strides_owner); + + host_task_events.push_back(temporaries_cleanup_ev); + + return std::make_pair(keep_args_alive(exec_q, {src, dst}, host_task_events), + copy_for_reshape_event); +} + +void init_copy_for_reshape_dispatch_vectors(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::copy_and_cast::CopyForReshapeGenericFactory; + + DispatchVectorBuilder + dvb; + dvb.populate_dispatch_vector(copy_for_reshape_generic_dispatch_vector); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/copy_for_reshape.hpp b/dpctl_ext/tensor/libtensor/source/copy_for_reshape.hpp new file mode 100644 index 000000000000..c5af885ad6cd --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_for_reshape.hpp @@ -0,0 +1,54 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + copy_usm_ndarray_for_reshape(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_copy_for_reshape_dispatch_vectors(); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp b/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp new file mode 100644 index 000000000000..a187b2247677 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp @@ -0,0 +1,400 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "copy_for_roll.hpp" +#include "kernels/copy_and_cast.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +#include "simplify_iteration_space.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::copy_and_cast::copy_for_roll_contig_fn_ptr_t; +using dpctl::tensor::kernels::copy_and_cast:: + copy_for_roll_ndshift_strided_fn_ptr_t; +using dpctl::tensor::kernels::copy_and_cast::copy_for_roll_strided_fn_ptr_t; +using dpctl::utils::keep_args_alive; + +// define static vector +static copy_for_roll_strided_fn_ptr_t + copy_for_roll_strided_dispatch_vector[td_ns::num_types]; + +static copy_for_roll_contig_fn_ptr_t + copy_for_roll_contig_dispatch_vector[td_ns::num_types]; + +static copy_for_roll_ndshift_strided_fn_ptr_t + copy_for_roll_ndshift_dispatch_vector[td_ns::num_types]; + +/* + * Copies src into dst (same data type) of different shapes by using flat + * iterations. + * + * Equivalent to the following loop: + * + * for i for range(src.size): + * dst[np.multi_index(i, dst.shape)] = src[np.multi_index(i, src.shape)] + */ +std::pair + copy_usm_ndarray_for_roll_1d(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + py::ssize_t shift, + sycl::queue &exec_q, + const std::vector &depends) +{ + int src_nd = src.get_ndim(); + int dst_nd = dst.get_ndim(); + + // Must have the same number of dimensions + if (src_nd != dst_nd) { + throw py::value_error( + "copy_usm_ndarray_for_roll_1d requires src and dst to " + "have the same number of dimensions."); + } + + const py::ssize_t *src_shape_ptr = src.get_shape_raw(); + const py::ssize_t *dst_shape_ptr = dst.get_shape_raw(); + + if (!std::equal(src_shape_ptr, src_shape_ptr + src_nd, dst_shape_ptr)) { + throw py::value_error( + "copy_usm_ndarray_for_roll_1d requires src and dst to " + "have the same shape."); + } + + py::ssize_t src_nelems = src.get_size(); + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + // typenames must be the same + if (src_typenum != dst_typenum) { + throw py::value_error( + "copy_usm_ndarray_for_roll_1d requires src and dst to " + "have the same type."); + } + + if (src_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, src_nelems); + + // check same contexts + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + if (src_nelems == 1) { + // handle special case of 1-element array + int src_elemsize = src.get_elemsize(); + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + sycl::event copy_ev = + exec_q.copy(src_data, dst_data, src_elemsize, depends); + return std::make_pair(keep_args_alive(exec_q, {src, dst}, {copy_ev}), + copy_ev); + } + + auto array_types = td_ns::usm_ndarray_types(); + int type_id = array_types.typenum_to_lookup_id(src_typenum); + + const bool is_src_c_contig = src.is_c_contiguous(); + const bool is_src_f_contig = src.is_f_contiguous(); + + const bool is_dst_c_contig = dst.is_c_contiguous(); + const bool is_dst_f_contig = dst.is_f_contiguous(); + + const bool both_c_contig = is_src_c_contig && is_dst_c_contig; + const bool both_f_contig = is_src_f_contig && is_dst_f_contig; + + // normalize shift parameter to be 0 <= offset < src_nelems + std::size_t offset = + (shift > 0) ? (shift % src_nelems) : src_nelems + (shift % src_nelems); + + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + + if (both_c_contig || both_f_contig) { + auto fn = copy_for_roll_contig_dispatch_vector[type_id]; + + if (fn != nullptr) { + static constexpr py::ssize_t zero_offset = 0; + + sycl::event copy_for_roll_ev = + fn(exec_q, offset, src_nelems, src_data, zero_offset, dst_data, + zero_offset, depends); + + sycl::event ht_ev = + keep_args_alive(exec_q, {src, dst}, {copy_for_roll_ev}); + + return std::make_pair(ht_ev, copy_for_roll_ev); + } + } + + auto const &src_strides = src.get_strides_vector(); + auto const &dst_strides = dst.get_strides_vector(); + + using shT = std::vector; + shT simplified_shape; + shT simplified_src_strides; + shT simplified_dst_strides; + py::ssize_t src_offset(0); + py::ssize_t dst_offset(0); + + int nd = src_nd; + const py::ssize_t *shape = src_shape_ptr; + + // nd, simplified_* and *_offset are modified by reference + dpctl::tensor::py_internal::simplify_iteration_space( + nd, shape, src_strides, dst_strides, + // output + simplified_shape, simplified_src_strides, simplified_dst_strides, + src_offset, dst_offset); + + if (nd == 1 && simplified_src_strides[0] == 1 && + simplified_dst_strides[0] == 1) { + auto fn = copy_for_roll_contig_dispatch_vector[type_id]; + + if (fn != nullptr) { + + sycl::event copy_for_roll_ev = + fn(exec_q, offset, src_nelems, src_data, src_offset, dst_data, + dst_offset, depends); + + sycl::event ht_ev = + keep_args_alive(exec_q, {src, dst}, {copy_for_roll_ev}); + + return std::make_pair(ht_ev, copy_for_roll_ev); + } + } + + auto fn = copy_for_roll_strided_dispatch_vector[type_id]; + + std::vector host_task_events; + host_task_events.reserve(2); + + // shape_strides = [src_shape, src_strides, dst_strides] + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, simplified_shape, simplified_src_strides, + simplified_dst_strides); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *shape_strides = shape_strides_owner.get(); + + std::vector all_deps(depends.size() + 1); + all_deps.push_back(copy_shape_ev); + all_deps.insert(std::end(all_deps), std::begin(depends), std::end(depends)); + + sycl::event copy_for_roll_event = + fn(exec_q, offset, src_nelems, src_nd, shape_strides, src_data, + src_offset, dst_data, dst_offset, all_deps); + + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {copy_for_roll_event}, shape_strides_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + return std::make_pair(keep_args_alive(exec_q, {src, dst}, host_task_events), + copy_for_roll_event); +} + +std::pair + copy_usm_ndarray_for_roll_nd(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const std::vector &shifts, + sycl::queue &exec_q, + const std::vector &depends) +{ + int src_nd = src.get_ndim(); + int dst_nd = dst.get_ndim(); + + // Must have the same number of dimensions + if (src_nd != dst_nd) { + throw py::value_error( + "copy_usm_ndarray_for_roll_nd requires src and dst to " + "have the same number of dimensions."); + } + + if (static_cast(src_nd) != shifts.size()) { + throw py::value_error( + "copy_usm_ndarray_for_roll_nd requires shifts to " + "contain an integral shift for each array dimension."); + } + + const py::ssize_t *src_shape_ptr = src.get_shape_raw(); + const py::ssize_t *dst_shape_ptr = dst.get_shape_raw(); + + if (!std::equal(src_shape_ptr, src_shape_ptr + src_nd, dst_shape_ptr)) { + throw py::value_error( + "copy_usm_ndarray_for_roll_nd requires src and dst to " + "have the same shape."); + } + + py::ssize_t src_nelems = src.get_size(); + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + // typenames must be the same + if (src_typenum != dst_typenum) { + throw py::value_error( + "copy_usm_ndarray_for_roll_nd requires src and dst to " + "have the same type."); + } + + if (src_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, src_nelems); + + // check for compatible queues + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + if (src_nelems == 1) { + // handle special case of 1-element array + int src_elemsize = src.get_elemsize(); + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + sycl::event copy_ev = + exec_q.copy(src_data, dst_data, src_elemsize, depends); + return std::make_pair(keep_args_alive(exec_q, {src, dst}, {copy_ev}), + copy_ev); + } + + auto array_types = td_ns::usm_ndarray_types(); + int type_id = array_types.typenum_to_lookup_id(src_typenum); + + std::vector normalized_shifts{}; + normalized_shifts.reserve(src_nd); + + for (int i = 0; i < src_nd; ++i) { + // normalize shift parameter to be 0 <= offset < dim + py::ssize_t dim = src_shape_ptr[i]; + std::size_t offset = + (shifts[i] >= 0) ? (shifts[i] % dim) : dim + (shifts[i] % dim); + + normalized_shifts.push_back(offset); + } + + const char *src_data = src.get_data(); + char *dst_data = dst.get_data(); + + auto const &src_strides = src.get_strides_vector(); + auto const &dst_strides = dst.get_strides_vector(); + auto const &common_shape = src.get_shape_vector(); + + static constexpr py::ssize_t src_offset = 0; + static constexpr py::ssize_t dst_offset = 0; + + auto fn = copy_for_roll_ndshift_dispatch_vector[type_id]; + + std::vector host_task_events; + host_task_events.reserve(2); + + // shape_strides = [src_shape, src_strides, dst_strides] + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, common_shape, src_strides, dst_strides, + normalized_shifts); + auto shape_strides_shifts_owner = + std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *shape_strides_shifts = shape_strides_shifts_owner.get(); + + std::vector all_deps(depends.size() + 1); + all_deps.push_back(copy_shape_ev); + all_deps.insert(std::end(all_deps), std::begin(depends), std::end(depends)); + + sycl::event copy_for_roll_event = + fn(exec_q, src_nelems, src_nd, shape_strides_shifts, src_data, + src_offset, dst_data, dst_offset, all_deps); + + auto temporaries_cleanup_ev = dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {copy_for_roll_event}, shape_strides_shifts_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + return std::make_pair(keep_args_alive(exec_q, {src, dst}, host_task_events), + copy_for_roll_event); +} + +void init_copy_for_roll_dispatch_vectors(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::copy_and_cast::CopyForRollStridedFactory; + + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(copy_for_roll_strided_dispatch_vector); + + using dpctl::tensor::kernels::copy_and_cast::CopyForRollContigFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(copy_for_roll_contig_dispatch_vector); + + using dpctl::tensor::kernels::copy_and_cast::CopyForRollNDShiftFactory; + DispatchVectorBuilder + dvb3; + dvb3.populate_dispatch_vector(copy_for_roll_ndshift_dispatch_vector); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/copy_for_roll.hpp b/dpctl_ext/tensor/libtensor/source/copy_for_roll.hpp new file mode 100644 index 000000000000..cffbf9f6f0d6 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_for_roll.hpp @@ -0,0 +1,65 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + copy_usm_ndarray_for_roll_1d(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + py::ssize_t shift, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern std::pair + copy_usm_ndarray_for_roll_nd(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const std::vector &shifts, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_copy_for_roll_dispatch_vectors(); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp b/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp new file mode 100644 index 000000000000..e97e8aeb1ca1 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp @@ -0,0 +1,368 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +#include "kernels/copy_and_cast.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +#include "copy_numpy_ndarray_into_usm_ndarray.hpp" +#include "simplify_iteration_space.hpp" + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace dpctl::tensor::py_internal +{ + +using dpctl::tensor::kernels::copy_and_cast:: + copy_and_cast_from_host_blocking_fn_ptr_t; + +static copy_and_cast_from_host_blocking_fn_ptr_t + copy_and_cast_from_host_blocking_dispatch_table[td_ns::num_types] + [td_ns::num_types]; + +using dpctl::tensor::kernels::copy_and_cast:: + copy_and_cast_from_host_contig_blocking_fn_ptr_t; + +static copy_and_cast_from_host_contig_blocking_fn_ptr_t + copy_and_cast_from_host_contig_blocking_dispatch_table[td_ns::num_types] + [td_ns::num_types]; + +void copy_numpy_ndarray_into_usm_ndarray( + const py::array &npy_src, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + int src_ndim = npy_src.ndim(); + int dst_ndim = dst.get_ndim(); + + if (src_ndim != dst_ndim) { + throw py::value_error("Source ndarray and destination usm_ndarray have " + "different array ranks, " + "i.e. different number of indices needed to " + "address array elements."); + } + + const py::ssize_t *src_shape = npy_src.shape(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + bool shapes_equal(true); + std::size_t src_nelems(1); + for (int i = 0; shapes_equal && (i < src_ndim); ++i) { + shapes_equal = shapes_equal && (src_shape[i] == dst_shape[i]); + src_nelems *= static_cast(src_shape[i]); + } + + if (!shapes_equal) { + throw py::value_error("Source ndarray and destination usm_ndarray have " + "difference shapes."); + } + + if (src_nelems == 0) { + // nothing to do + return; + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, src_nelems); + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error("Execution queue is not compatible with the " + "allocation queue"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + // here we assume that NumPy's type numbers agree with ours for types + // supported in both + int src_typenum = + py::detail::array_descriptor_proxy(npy_src.dtype().ptr())->type_num; + int dst_typenum = dst.get_typenum(); + + const auto &array_types = td_ns::usm_ndarray_types(); + int src_type_id = array_types.typenum_to_lookup_id(src_typenum); + int dst_type_id = array_types.typenum_to_lookup_id(dst_typenum); + + py::buffer_info src_pybuf = npy_src.request(); + const char *const src_data = static_cast(src_pybuf.ptr); + char *dst_data = dst.get_data(); + + int src_flags = npy_src.flags(); + + // check for applicability of special cases: + // (same type && (both C-contiguous || both F-contiguous) + const bool both_c_contig = + ((src_flags & py::array::c_style) && dst.is_c_contiguous()); + const bool both_f_contig = + ((src_flags & py::array::f_style) && dst.is_f_contiguous()); + + const bool same_data_types = (src_type_id == dst_type_id); + + if (both_c_contig || both_f_contig) { + if (same_data_types) { + int src_elem_size = npy_src.itemsize(); + + sycl::event copy_ev = + exec_q.memcpy(static_cast(dst_data), + static_cast(src_data), + src_nelems * src_elem_size, depends); + + { + // wait for copy_ev to complete + // release GIL to allow other threads (host_tasks) + // a chance to acquire GIL + py::gil_scoped_release lock{}; + copy_ev.wait(); + } + + return; + } + else { + py::gil_scoped_release lock{}; + + auto copy_and_cast_from_host_contig_blocking_fn = + copy_and_cast_from_host_contig_blocking_dispatch_table + [dst_type_id][src_type_id]; + + static constexpr py::ssize_t zero_offset(0); + + copy_and_cast_from_host_contig_blocking_fn( + exec_q, src_nelems, src_data, zero_offset, dst_data, + zero_offset, depends); + + return; + } + } + + auto const &dst_strides = + dst.get_strides_vector(); // N.B.: strides in elements + + using shT = std::vector; + shT simplified_shape; + shT simplified_src_strides; + shT simplified_dst_strides; + py::ssize_t src_offset(0); + py::ssize_t dst_offset(0); + + int nd = src_ndim; + const py::ssize_t *shape = src_shape; + + const py::ssize_t *src_strides_p = + npy_src.strides(); // N.B.: strides in bytes + py::ssize_t src_itemsize = npy_src.itemsize(); // item size in bytes + + bool is_src_c_contig = ((src_flags & py::array::c_style) != 0); + bool is_src_f_contig = ((src_flags & py::array::f_style) != 0); + + shT src_strides_in_elems; + if (src_strides_p) { + src_strides_in_elems.resize(nd); + // copy and convert strides from bytes to elements + std::transform( + src_strides_p, src_strides_p + nd, std::begin(src_strides_in_elems), + [src_itemsize](py::ssize_t el) { + py::ssize_t q = el / src_itemsize; + if (q * src_itemsize != el) { + throw std::runtime_error( + "NumPy array strides are not multiple of itemsize"); + } + return q; + }); + } + else { + if (is_src_c_contig) { + src_strides_in_elems = + dpctl::tensor::c_contiguous_strides(nd, src_shape); + } + else if (is_src_f_contig) { + src_strides_in_elems = + dpctl::tensor::f_contiguous_strides(nd, src_shape); + } + else { + throw py::value_error("NumPy source array has null strides but is " + "neither C- nor F-contiguous."); + } + } + + // nd, simplified_* vectors and offsets are modified by reference + simplify_iteration_space(nd, shape, src_strides_in_elems, dst_strides, + // outputs + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); + + assert(simplified_shape.size() == static_cast(nd)); + assert(simplified_src_strides.size() == static_cast(nd)); + assert(simplified_dst_strides.size() == static_cast(nd)); + + // handle nd == 0 + if (nd == 0) { + nd = 1; + simplified_shape.reserve(nd); + simplified_shape.push_back(1); + + simplified_src_strides.reserve(nd); + simplified_src_strides.push_back(1); + + simplified_dst_strides.reserve(nd); + simplified_dst_strides.push_back(1); + } + + const bool is_contig_vector = + ((nd == 1) && (simplified_src_strides.front() == 1) && + (simplified_dst_strides.front() == 1)); + + const bool can_use_memcpy = (same_data_types && is_contig_vector && + (src_offset == 0) && (dst_offset == 0)); + + if (can_use_memcpy) { + int src_elem_size = npy_src.itemsize(); + + sycl::event copy_ev = exec_q.memcpy( + static_cast(dst_data), static_cast(src_data), + src_nelems * src_elem_size, depends); + + { + // wait for copy_ev to complete + // release GIL to allow other threads (host_tasks) + // a chance to acquire GIL + py::gil_scoped_release lock{}; + + copy_ev.wait(); + } + + return; + } + + // Minimum and maximum element offsets for source np.ndarray + py::ssize_t npy_src_min_nelem_offset(src_offset); + py::ssize_t npy_src_max_nelem_offset(src_offset); + for (int i = 0; i < nd; ++i) { + if (simplified_src_strides[i] < 0) { + npy_src_min_nelem_offset += + simplified_src_strides[i] * (simplified_shape[i] - 1); + } + else { + npy_src_max_nelem_offset += + simplified_src_strides[i] * (simplified_shape[i] - 1); + } + } + + if (is_contig_vector) { + // release GIL for the blocking call + py::gil_scoped_release lock{}; + + auto copy_and_cast_from_host_contig_blocking_fn = + copy_and_cast_from_host_contig_blocking_dispatch_table[dst_type_id] + [src_type_id]; + + copy_and_cast_from_host_contig_blocking_fn(exec_q, src_nelems, src_data, + src_offset, dst_data, + dst_offset, depends); + + return; + } + + std::vector host_task_events; + host_task_events.reserve(1); + + // Copy shape strides into device memory + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, simplified_shape, simplified_src_strides, + simplified_dst_strides); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + const sycl::event ©_shape_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *shape_strides = shape_strides_owner.get(); + + { + // release GIL for the blocking call + py::gil_scoped_release lock{}; + + // Get implementation function pointer + auto copy_and_cast_from_host_blocking_fn = + copy_and_cast_from_host_blocking_dispatch_table[dst_type_id] + [src_type_id]; + + copy_and_cast_from_host_blocking_fn( + exec_q, src_nelems, nd, shape_strides, src_data, src_offset, + npy_src_min_nelem_offset, npy_src_max_nelem_offset, dst_data, + dst_offset, depends, {copy_shape_ev}); + + // invoke USM deleter in smart pointer while GIL is held + shape_strides_owner.reset(nullptr); + } + + return; +} + +void init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::copy_and_cast::CopyAndCastFromHostFactory; + + DispatchTableBuilder + dtb_copy_from_numpy; + + dtb_copy_from_numpy.populate_dispatch_table( + copy_and_cast_from_host_blocking_dispatch_table); + + using dpctl::tensor::kernels::copy_and_cast:: + CopyAndCastFromHostContigFactory; + + DispatchTableBuilder + dtb_copy_from_numpy_contig; + + dtb_copy_from_numpy_contig.populate_dispatch_table( + copy_and_cast_from_host_contig_blocking_dispatch_table); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.hpp b/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.hpp new file mode 100644 index 000000000000..f2de95f97cca --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#pragma once +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void copy_numpy_ndarray_into_usm_ndarray( + const py::array &npy_src, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 0478fb19678c..3e5be4d9e8fe 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -48,9 +48,9 @@ // #include "clip.hpp" #include "copy_and_cast_usm_to_usm.hpp" #include "copy_as_contig.hpp" -// #include "copy_for_reshape.hpp" -// #include "copy_for_roll.hpp" -// #include "copy_numpy_ndarray_into_usm_ndarray.hpp" +#include "copy_for_reshape.hpp" +#include "copy_for_roll.hpp" +#include "copy_numpy_ndarray_into_usm_ndarray.hpp" #include "device_support_queries.hpp" // #include "eye_ctor.hpp" #include "full_ctor.hpp" @@ -84,16 +84,16 @@ using dpctl::tensor::py_internal::py_as_f_contig; /* =========================== Copy for reshape ============================= */ -// using dpctl::tensor::py_internal::copy_usm_ndarray_for_reshape; +using dpctl::tensor::py_internal::copy_usm_ndarray_for_reshape; /* =========================== Copy for roll ============================= */ -// using dpctl::tensor::py_internal::copy_usm_ndarray_for_roll_1d; -// using dpctl::tensor::py_internal::copy_usm_ndarray_for_roll_nd; +using dpctl::tensor::py_internal::copy_usm_ndarray_for_roll_1d; +using dpctl::tensor::py_internal::copy_usm_ndarray_for_roll_nd; /* ============= Copy from numpy.ndarray to usm_ndarray ==================== */ -// using dpctl::tensor::py_internal::copy_numpy_ndarray_into_usm_ndarray; +using dpctl::tensor::py_internal::copy_numpy_ndarray_into_usm_ndarray; /* ============= linear-sequence ==================== */ @@ -143,7 +143,7 @@ void init_dispatch_tables(void) using namespace dpctl::tensor::py_internal; init_copy_and_cast_usm_to_usm_dispatch_tables(); - // init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(); + init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(); init_advanced_indexing_dispatch_tables(); // init_where_dispatch_tables(); return; @@ -155,8 +155,8 @@ void init_dispatch_vectors(void) using namespace dpctl::tensor::py_internal; init_copy_as_contig_dispatch_vectors(); - // init_copy_for_reshape_dispatch_vectors(); - // init_copy_for_roll_dispatch_vectors(); + init_copy_for_reshape_dispatch_vectors(); + init_copy_for_roll_dispatch_vectors(); // init_linear_sequences_dispatch_vectors(); init_full_ctor_dispatch_vectors(); init_zeros_ctor_dispatch_vectors(); @@ -276,28 +276,29 @@ PYBIND11_MODULE(_tensor_impl, m) }, ""); - // m.def("_copy_usm_ndarray_for_reshape", ©_usm_ndarray_for_reshape, - // "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same - // " "number of elements using underlying 'C'-contiguous order for - // flat " "traversal. " "Returns a tuple of events: (ht_event, - // comp_event)", py::arg("src"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_copy_usm_ndarray_for_reshape", ©_usm_ndarray_for_reshape, + "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same " + "number of elements using underlying 'C'-contiguous order for flat " + "traversal. " + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("src"), py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); - // m.def("_copy_usm_ndarray_for_roll_1d", ©_usm_ndarray_for_roll_1d, - // "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same - // " "shapes using underlying 'C'-contiguous order for flat " - // "traversal with shift. " - // "Returns a tuple of events: (ht_event, comp_event)", - // py::arg("src"), py::arg("dst"), py::arg("shift"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_copy_usm_ndarray_for_roll_1d", ©_usm_ndarray_for_roll_1d, + "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same " + "shapes using underlying 'C'-contiguous order for flat " + "traversal with shift. " + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("src"), py::arg("dst"), py::arg("shift"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_copy_usm_ndarray_for_roll_nd", ©_usm_ndarray_for_roll_nd, - // "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same - // " "shapes using underlying 'C'-contiguous order for " "traversal - // with shifts along each axis. " "Returns a tuple of events: - // (ht_event, comp_event)", py::arg("src"), py::arg("dst"), - // py::arg("shifts"), py::arg("sycl_queue"), py::arg("depends") = - // py::list()); + m.def("_copy_usm_ndarray_for_roll_nd", ©_usm_ndarray_for_roll_nd, + "Copies from usm_ndarray `src` into usm_ndarray `dst` with the same " + "shapes using underlying 'C'-contiguous order for " + "traversal with shifts along each axis. " + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("src"), py::arg("dst"), py::arg("shifts"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); // m.def("_linspace_step", &usm_ndarray_linear_sequence_step, // "Fills input 1D contiguous usm_ndarray `dst` with linear @@ -314,11 +315,11 @@ PYBIND11_MODULE(_tensor_impl, m) // py::arg("include_endpoint"), py::arg("sycl_queue"), // py::arg("depends") = py::list()); - // m.def("_copy_numpy_ndarray_into_usm_ndarray", - // ©_numpy_ndarray_into_usm_ndarray, - // "Copy from numpy array `src` into usm_ndarray `dst` - // synchronously.", py::arg("src"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_copy_numpy_ndarray_into_usm_ndarray", + ©_numpy_ndarray_into_usm_ndarray, + "Copy from numpy array `src` into usm_ndarray `dst` synchronously.", + py::arg("src"), py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); m.def("_zeros_usm_ndarray", &usm_ndarray_zeros, "Populate usm_ndarray `dst` with zeros.", py::arg("dst"), diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index d94a031801f3..47edf63a68b4 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -33,6 +33,9 @@ import dpctl.utils as dpu import numpy +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from dpnp.dpnp_array import dpnp_array from dpnp.dpnp_utils import get_usm_allocations, map_dtype_to_device @@ -230,7 +233,9 @@ def dpnp_linspace( usm_type=_usm_type, sycl_queue=sycl_queue_normalized, ) - usm_res = dpt.reshape(usm_res, (-1,) + (1,) * delta.ndim, copy=False) + usm_res = dpt_ext.reshape( + usm_res, (-1,) + (1,) * delta.ndim, copy=False + ) if step_num > 0: step = delta / step_num @@ -256,7 +261,7 @@ def dpnp_linspace( if dpnp.issubdtype(dtype, dpnp.integer): dpt.floor(usm_res, out=usm_res) - res = dpt.astype(usm_res, dtype, copy=False) + res = dpt_ext.astype(usm_res, dtype, copy=False) res = dpnp_array._create_from_usm_ndarray(res) if retstep is True: diff --git a/dpnp/dpnp_algo/dpnp_elementwise_common.py b/dpnp/dpnp_algo/dpnp_elementwise_common.py index 88abcee5035c..55d74e8c1803 100644 --- a/dpnp/dpnp_algo/dpnp_elementwise_common.py +++ b/dpnp/dpnp_algo/dpnp_elementwise_common.py @@ -47,6 +47,7 @@ # pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as dti import dpnp import dpnp.backend.extensions.vm._vm_impl as vmi @@ -212,7 +213,7 @@ def __call__( x_usm = dpnp.get_usm_ndarray(x) if dtype is not None: - x_usm = dpt.astype(x_usm, dtype, copy=False) + x_usm = dpt_ext.astype(x_usm, dtype, copy=False) out = self._unpack_out_kw(out) out_usm = None if out is None else dpnp.get_usm_ndarray(out) @@ -718,9 +719,9 @@ def __call__( sycl_queue=x2.sycl_queue, usm_type=x2.usm_type, ) - x2_usm = dpt.astype(x2_usm, dtype, copy=False) + x2_usm = dpt_ext.astype(x2_usm, dtype, copy=False) elif dpnp.isscalar(x2): - x1_usm = dpt.astype(x1_usm, dtype, copy=False) + x1_usm = dpt_ext.astype(x1_usm, dtype, copy=False) x2_usm = dpt.asarray( x2, dtype=dtype, @@ -728,8 +729,8 @@ def __call__( usm_type=x1.usm_type, ) else: - x1_usm = dpt.astype(x1_usm, dtype, copy=False) - x2_usm = dpt.astype(x2_usm, dtype, copy=False) + x1_usm = dpt_ext.astype(x1_usm, dtype, copy=False) + x2_usm = dpt_ext.astype(x2_usm, dtype, copy=False) res_usm = super().__call__(x1_usm, x2_usm, out=out_usm, order=order) @@ -1325,7 +1326,7 @@ def __call__(self, x, /, decimals=0, out=None, *, dtype=None): res_usm = dpt.divide(x_usm, 10**decimals, out=out_usm) if dtype is not None: - res_usm = dpt.astype(res_usm, dtype, copy=False) + res_usm = dpt_ext.astype(res_usm, dtype, copy=False) if out is not None and isinstance(out, dpnp_array): return out diff --git a/dpnp/dpnp_algo/dpnp_fill.py b/dpnp/dpnp_algo/dpnp_fill.py index 4137a2794747..ddba9f634cb1 100644 --- a/dpnp/dpnp_algo/dpnp_fill.py +++ b/dpnp/dpnp_algo/dpnp_fill.py @@ -32,10 +32,10 @@ import dpctl.utils as dpu from dpctl.tensor._ctors import _cast_fill_val -import dpnp - # TODO: revert to `from dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext +import dpnp from dpctl_ext.tensor._tensor_impl import ( _copy_usm_ndarray_into_usm_ndarray, _full_usm_ndarray, @@ -56,7 +56,7 @@ def dpnp_fill(arr, val): raise dpu.ExecutionPlacementError( "Input arrays have incompatible queues." ) - a_val = dpt.astype(val, arr.dtype) + a_val = dpt_ext.astype(val, arr.dtype) a_val = dpt.broadcast_to(a_val, arr.shape) _manager = dpu.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index bb864d4444a9..0b6d882c53db 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -41,6 +41,9 @@ import dpctl.tensor._type_utils as dtu from dpctl.tensor._numpy_helper import AxisError +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from . import memory as dpm @@ -774,7 +777,7 @@ def asnumpy(self): """ - return dpt.asnumpy(self._array_obj) + return dpt_ext.asnumpy(self._array_obj) def astype( self, diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index c8e28529cd57..acda579a5f5e 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -38,6 +38,8 @@ import dpctl.tensor as dpt import dpctl.utils as dpu +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext import dpnp from dpnp.dpnp_array import dpnp_array @@ -141,7 +143,7 @@ def copy(x1, /, *, order="K"): if order is None: order = "K" - array_obj = dpt.copy(dpnp.get_usm_ndarray(x1), order=order) + array_obj = dpt_ext.copy(dpnp.get_usm_ndarray(x1), order=order) return dpnp_array._create_from_usm_ndarray(array_obj) diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index 533bdc36c617..6c050a208981 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -53,6 +53,7 @@ # pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti import dpnp @@ -136,7 +137,7 @@ def asnumpy(a, order="C"): return a.asnumpy() if isinstance(a, dpt.usm_ndarray): - return dpt.asnumpy(a) + return dpt_ext.asnumpy(a) return numpy.asarray(a, order=order) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index e7b902647186..52fc4b7f6448 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -46,6 +46,9 @@ import dpctl.tensor as dpt import numpy +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from dpnp import dpnp_container @@ -934,7 +937,7 @@ def astype(x, dtype, /, *, order="K", casting="unsafe", copy=True, device=None): order = "K" usm_x = dpnp.get_usm_ndarray(x) - usm_res = dpt.astype( + usm_res = dpt_ext.astype( usm_x, dtype, order=order, casting=casting, copy=copy, device=device ) @@ -3116,7 +3119,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing="xy"): s0 = (1,) * ndim output = [ - dpt.reshape(dpnp.get_usm_ndarray(x), s0[:i] + (-1,) + s0[i + 1 :]) + dpt_ext.reshape(dpnp.get_usm_ndarray(x), s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi) ] @@ -3124,14 +3127,14 @@ def meshgrid(*xi, copy=True, sparse=False, indexing="xy"): _, _ = get_usm_allocations(output) if indexing == "xy" and ndim > 1: - output[0] = dpt.reshape(output[0], (1, -1) + s0[2:]) - output[1] = dpt.reshape(output[1], (-1, 1) + s0[2:]) + output[0] = dpt_ext.reshape(output[0], (1, -1) + s0[2:]) + output[1] = dpt_ext.reshape(output[1], (-1, 1) + s0[2:]) if not sparse: output = dpt.broadcast_arrays(*output) if copy: - output = [dpt.copy(x) for x in output] + output = [dpt_ext.copy(x) for x in output] return [dpnp_array._create_from_usm_ndarray(x) for x in output] @@ -3931,7 +3934,7 @@ def vander( tmp = m[:, ::-1] if not increasing else m dpnp.power( - dpt.reshape(usm_x, (-1, 1)), + dpt_ext.reshape(usm_x, (-1, 1)), dpt.arange( N, dtype=_dtype, usm_type=x_usm_type, sycl_queue=x_sycl_queue ), diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index 583561573b85..b0769337c38b 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -51,11 +51,10 @@ from dpctl.tensor._indexing_functions import _get_indexing_mode from dpctl.tensor._numpy_helper import normalize_axis_index -import dpctl_ext.tensor as dpt_ext - # pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti import dpnp @@ -243,7 +242,7 @@ def choose(a, choices, out=None, mode="wrap"): # NumPy will cast up to int64 in general but # int32 is more than safe for bool if ind_dt == dpnp.bool: - inds = dpt.astype(inds, dpt.int32) + inds = dpt_ext.astype(inds, dpt.int32) else: raise TypeError("input index array must be of integer data type") @@ -256,7 +255,7 @@ def choose(a, choices, out=None, mode="wrap"): choices = tuple( map( lambda chc: ( - chc if chc.dtype == res_dt else dpt.astype(chc, res_dt) + chc if chc.dtype == res_dt else dpt_ext.astype(chc, res_dt) ), choices, ) @@ -815,14 +814,14 @@ def extract(condition, a): ) if usm_cond.size != usm_a.size: - usm_a = dpt.reshape(usm_a, -1) - usm_cond = dpt.reshape(usm_cond, -1) + usm_a = dpt_ext.reshape(usm_a, -1) + usm_cond = dpt_ext.reshape(usm_cond, -1) usm_res = dpt_ext.take(usm_a, dpt.nonzero(usm_cond)[0]) else: if usm_cond.shape != usm_a.shape: - usm_a = dpt.reshape(usm_a, -1) - usm_cond = dpt.reshape(usm_cond, -1) + usm_a = dpt_ext.reshape(usm_a, -1) + usm_cond = dpt_ext.reshape(usm_cond, -1) usm_res = dpt.extract(usm_cond, usm_a) @@ -959,18 +958,18 @@ def fill_diagonal(a, val, wrap=False): # a.flat[:end:step] = val # but need to consider use case when `a` is usm_ndarray also a_sh = a.shape - tmp_a = dpt.reshape(usm_a, -1) + tmp_a = dpt_ext.reshape(usm_a, -1) if dpnp.isscalar(usm_val): tmp_a[:end:step] = usm_val else: - usm_val = dpt.reshape(usm_val, -1) + usm_val = dpt_ext.reshape(usm_val, -1) # Setitem can work only if index size equal val size. # Using loop for general case without dependencies of val size. for i in range(0, usm_val.size): tmp_a[step * i : end : step * (i + 1)] = usm_val[i] - tmp_a = dpt.reshape(tmp_a, a_sh) + tmp_a = dpt_ext.reshape(tmp_a, a_sh) usm_a[:] = tmp_a @@ -1611,12 +1610,14 @@ def place(a, mask, vals): if usm_vals.ndim != 1: # dpt.place supports only 1-D array of values - usm_vals = dpt.reshape(usm_vals, -1) + usm_vals = dpt_ext.reshape(usm_vals, -1) if usm_vals.dtype != usm_a.dtype: # dpt.place casts values to a.dtype with "unsafe" rule, # while numpy.place does that with "safe" casting rule - usm_vals = dpt.astype(usm_vals, usm_a.dtype, casting="safe", copy=False) + usm_vals = dpt_ext.astype( + usm_vals, usm_a.dtype, casting="safe", copy=False + ) dpt.place(usm_a, usm_mask, usm_vals) @@ -1708,19 +1709,19 @@ def put(a, ind, v, /, *, axis=None, mode="wrap"): if usm_ind.ndim != 1: # dpt.put supports only 1-D array of indices - usm_ind = dpt.reshape(usm_ind, -1, copy=False) + usm_ind = dpt_ext.reshape(usm_ind, -1, copy=False) if not dpnp.issubdtype(usm_ind.dtype, dpnp.integer): # dpt.put supports only integer dtype for array of indices - usm_ind = dpt.astype(usm_ind, dpnp.intp, casting="safe") + usm_ind = dpt_ext.astype(usm_ind, dpnp.intp, casting="safe") in_usm_a = usm_a if axis is None and usm_a.ndim > 1: - usm_a = dpt.reshape(usm_a, -1) + usm_a = dpt_ext.reshape(usm_a, -1) dpt_ext.put(usm_a, usm_ind, usm_v, axis=axis, mode=mode) if in_usm_a._pointer != usm_a._pointer: # pylint: disable=protected-access - in_usm_a[:] = dpt.reshape(usm_a, in_usm_a.shape, copy=False) + in_usm_a[:] = dpt_ext.reshape(usm_a, in_usm_a.shape, copy=False) def put_along_axis(a, ind, values, axis, mode="wrap"): @@ -2162,7 +2163,7 @@ def take(a, indices, /, *, axis=None, out=None, mode="wrap"): if axis is None: if a_ndim > 1: # flatten input array - usm_a = dpt.reshape(usm_a, -1) + usm_a = dpt_ext.reshape(usm_a, -1) axis = 0 elif a_ndim == 0: axis = normalize_axis_index(operator.index(axis), 1) @@ -2171,7 +2172,7 @@ def take(a, indices, /, *, axis=None, out=None, mode="wrap"): if not dpnp.issubdtype(usm_ind.dtype, dpnp.integer): # dpt.take supports only integer dtype for array of indices - usm_ind = dpt.astype(usm_ind, dpnp.intp, copy=False, casting="safe") + usm_ind = dpt_ext.astype(usm_ind, dpnp.intp, copy=False, casting="safe") usm_res = _take_index( usm_a, usm_ind, axis, exec_q, res_usm_type, out=out, mode=mode diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index dd872485a602..4866c912ab6a 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -53,6 +53,9 @@ normalize_axis_tuple, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from .dpnp_array import dpnp_array @@ -415,7 +418,7 @@ def _get_first_nan_index(usm_a): dpt.place( usm_res.inverse_indices, usm_res.inverse_indices > first_nan, - dpt.reshape(first_nan, 1), + dpt_ext.reshape(first_nan, 1), ) result += (usm_res.inverse_indices,) @@ -3057,7 +3060,7 @@ def reshape(a, /, shape, order="C", *, copy=None): ) usm_a = dpnp.get_usm_ndarray(a) - usm_res = dpt.reshape(usm_a, shape=shape, order=order, copy=copy) + usm_res = dpt_ext.reshape(usm_a, shape=shape, order=order, copy=copy) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -3259,9 +3262,9 @@ def roll(x, shift, axis=None): shift = dpnp.asnumpy(shift) if axis is None: - return roll(dpt.reshape(usm_x, -1), shift, 0).reshape(x.shape) + return roll(dpt_ext.reshape(usm_x, -1), shift, 0).reshape(x.shape) - usm_res = dpt.roll(usm_x, shift=shift, axis=axis) + usm_res = dpt_ext.roll(usm_x, shift=shift, axis=axis) return dpnp_array._create_from_usm_ndarray(usm_res) diff --git a/dpnp/dpnp_iface_sorting.py b/dpnp/dpnp_iface_sorting.py index 9c5097a5f3e3..be6c52ae9d80 100644 --- a/dpnp/dpnp_iface_sorting.py +++ b/dpnp/dpnp_iface_sorting.py @@ -44,6 +44,9 @@ import dpctl.tensor as dpt from dpctl.tensor._numpy_helper import normalize_axis_index +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp # pylint: disable=no-name-in-module @@ -84,7 +87,7 @@ def _wrap_sort_argsort( usm_a = dpnp.get_usm_ndarray(a) if axis is None: - usm_a = dpt.reshape(usm_a, -1) + usm_a = dpt_ext.reshape(usm_a, -1) axis = -1 axis = normalize_axis_index(axis, ndim=usm_a.ndim) diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index 7e092184366c..daff981d5cc4 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -47,6 +47,9 @@ import numpy from dpctl.tensor._numpy_helper import normalize_axis_index +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp # pylint: disable=no-name-in-module @@ -1204,7 +1207,7 @@ def mean(a, /, axis=None, dtype=None, out=None, keepdims=False, *, where=True): usm_a = dpnp.get_usm_ndarray(a) usm_res = dpt.mean(usm_a, axis=axis, keepdims=keepdims) if dtype is not None: - usm_res = dpt.astype(usm_res, dtype) + usm_res = dpt_ext.astype(usm_res, dtype) return dpnp.get_result_array(usm_res, out, casting="unsafe") diff --git a/dpnp/tests/test_arraycreation.py b/dpnp/tests/test_arraycreation.py index d8a80ddbff78..88e6aacb997d 100644 --- a/dpnp/tests/test_arraycreation.py +++ b/dpnp/tests/test_arraycreation.py @@ -13,6 +13,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from .helper import ( @@ -969,7 +972,7 @@ def test_ones_like(array, dtype, order): ], ) def test_dpctl_tensor_input(func, args): - x0 = dpt.reshape(dpt.arange(9), (3, 3)) + x0 = dpt_ext.reshape(dpt.arange(9), (3, 3)) new_args = [eval(val, {"x0": x0}) for val in args] X = getattr(dpt, func)(*new_args) Y = getattr(dpnp, func)(*new_args) From 192bd937230e671bac1ebcf4e3addccccc83c890 Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Tue, 3 Mar 2026 14:39:46 +0100 Subject: [PATCH 03/11] Extend `._tensor_impl` with advanced indexing functions (#2777) This PR extends `_tensor_impl` in `dpctl_ext.tensor` with the advanced indexing (`_extract, _place, _nonzero, mask_positions, `), repeat (`_cumsum_1d`) and `_eye` functions It also adds `eye(), extract(), nonzero(), place(), put_along_axis(), take_along_axis()` to `dpctl_ext.tensor` and updates the corresponding dpnp functions to use these implementations internally --- dpctl_ext/tensor/CMakeLists.txt | 6 +- dpctl_ext/tensor/__init__.py | 12 + dpctl_ext/tensor/_copy_utils.py | 306 ++++ dpctl_ext/tensor/_ctors.py | 128 ++ dpctl_ext/tensor/_indexing_functions.py | 309 ++++ .../include/kernels/accumulators.hpp | 1448 +++++++++++++++++ .../kernels/boolean_advanced_indexing.hpp | 853 ++++++++++ .../include/kernels/constructors.hpp | 96 +- .../tensor/libtensor/source/accumulators.cpp | 406 +++++ .../tensor/libtensor/source/accumulators.hpp | 62 + .../source/boolean_advanced_indexing.cpp | 859 ++++++++++ .../source/boolean_advanced_indexing.hpp | 81 + .../tensor/libtensor/source/eye_ctor.cpp | 142 ++ .../tensor/libtensor/source/eye_ctor.hpp | 57 + .../tensor/libtensor/source/tensor_ctors.cpp | 74 +- dpnp/dpnp_container.py | 2 +- dpnp/dpnp_iface_indexing.py | 18 +- dpnp/dpnp_iface_manipulation.py | 2 +- 18 files changed, 4809 insertions(+), 52 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/accumulators.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/boolean_advanced_indexing.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/accumulators.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/accumulators.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/eye_ctor.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/eye_ctor.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 93555981deaa..0b166a202735 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -45,7 +45,7 @@ set(_static_lib_sources ) set(_tensor_impl_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/tensor_ctors.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/accumulators.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/accumulators.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_and_cast_usm_to_usm.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_as_contig.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp @@ -53,8 +53,8 @@ set(_tensor_impl_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_roll.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/integer_advanced_indexing.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_advanced_indexing.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/eye_ctor.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_advanced_indexing.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/eye_ctor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/zeros_ctor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/triul_ctor.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index edb2c096bad1..fa76faccc632 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -35,13 +35,19 @@ to_numpy, ) from dpctl_ext.tensor._ctors import ( + eye, full, tril, triu, ) from dpctl_ext.tensor._indexing_functions import ( + extract, + nonzero, + place, put, + put_along_axis, take, + take_along_axis, ) from dpctl_ext.tensor._manipulation_functions import ( roll, @@ -52,12 +58,18 @@ "asnumpy", "astype", "copy", + "extract", + "eye", "from_numpy", "full", + "nonzero", + "place", "put", + "put_along_axis", "reshape", "roll", "take", + "take_along_axis", "to_numpy", "tril", "triu", diff --git a/dpctl_ext/tensor/_copy_utils.py b/dpctl_ext/tensor/_copy_utils.py index c62218893a2c..5d1ac209c86b 100644 --- a/dpctl_ext/tensor/_copy_utils.py +++ b/dpctl_ext/tensor/_copy_utils.py @@ -27,6 +27,8 @@ # ***************************************************************************** import builtins +import operator +from numbers import Integral import dpctl import dpctl.memory as dpm @@ -39,8 +41,11 @@ # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti +from ._numpy_helper import normalize_axis_index + __doc__ = ( "Implementation module for copy- and cast- operations on " ":class:`dpctl.tensor.usm_ndarray`." @@ -130,6 +135,307 @@ def _copy_from_numpy_into(dst, np_ary): ) +def _extract_impl(ary, ary_mask, axis=0): + """ + Extract elements of ary by applying mask starting from slot + dimension axis + """ + if not isinstance(ary, dpt.usm_ndarray): + raise TypeError( + f"Expecting type dpctl.tensor.usm_ndarray, got {type(ary)}" + ) + if isinstance(ary_mask, dpt.usm_ndarray): + dst_usm_type = dpctl.utils.get_coerced_usm_type( + (ary.usm_type, ary_mask.usm_type) + ) + exec_q = dpctl.utils.get_execution_queue( + (ary.sycl_queue, ary_mask.sycl_queue) + ) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError( + "arrays have different associated queues. " + "Use `y.to_device(x.device)` to migrate." + ) + elif isinstance(ary_mask, np.ndarray): + dst_usm_type = ary.usm_type + exec_q = ary.sycl_queue + ary_mask = dpt.asarray( + ary_mask, usm_type=dst_usm_type, sycl_queue=exec_q + ) + else: + raise TypeError( + "Expecting type dpctl.tensor.usm_ndarray or numpy.ndarray, got " + f"{type(ary_mask)}" + ) + ary_nd = ary.ndim + pp = normalize_axis_index(operator.index(axis), ary_nd) + mask_nd = ary_mask.ndim + if pp < 0 or pp + mask_nd > ary_nd: + raise ValueError( + "Parameter p is inconsistent with input array dimensions" + ) + mask_nelems = ary_mask.size + cumsum_dt = dpt.int32 if mask_nelems < int32_t_max else dpt.int64 + cumsum = dpt.empty(mask_nelems, dtype=cumsum_dt, device=ary_mask.device) + exec_q = cumsum.sycl_queue + _manager = dpctl.utils.SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + mask_count = ti.mask_positions( + ary_mask, cumsum, sycl_queue=exec_q, depends=dep_evs + ) + dst_shape = ary.shape[:pp] + (mask_count,) + ary.shape[pp + mask_nd :] + dst = dpt.empty( + dst_shape, dtype=ary.dtype, usm_type=dst_usm_type, device=ary.device + ) + if dst.size == 0: + return dst + hev, ev = ti._extract( + src=ary, + cumsum=cumsum, + axis_start=pp, + axis_end=pp + mask_nd, + dst=dst, + sycl_queue=exec_q, + depends=dep_evs, + ) + _manager.add_event_pair(hev, ev) + return dst + + +def _get_indices_queue_usm_type(inds, queue, usm_type): + """ + Utility for validating indices are NumPy ndarray or usm_ndarray of integral + dtype or Python integers. At least one must be an array. + + For each array, the queue and usm type are appended to `queue_list` and + `usm_type_list`, respectively. + """ + queues = [queue] + usm_types = [usm_type] + any_array = False + for ind in inds: + if isinstance(ind, (np.ndarray, dpt.usm_ndarray)): + any_array = True + if ind.dtype.kind not in "ui": + raise IndexError( + "arrays used as indices must be of integer (or boolean) " + "type" + ) + if isinstance(ind, dpt.usm_ndarray): + queues.append(ind.sycl_queue) + usm_types.append(ind.usm_type) + elif not isinstance(ind, Integral): + raise TypeError( + "all elements of `ind` expected to be usm_ndarrays, " + f"NumPy arrays, or integers, found {type(ind)}" + ) + if not any_array: + raise TypeError( + "at least one element of `inds` expected to be an array" + ) + usm_type = dpctl.utils.get_coerced_usm_type(usm_types) + q = dpctl.utils.get_execution_queue(queues) + return q, usm_type + + +def _nonzero_impl(ary): + if not isinstance(ary, dpt.usm_ndarray): + raise TypeError( + f"Expecting type dpctl.tensor.usm_ndarray, got {type(ary)}" + ) + exec_q = ary.sycl_queue + usm_type = ary.usm_type + mask_nelems = ary.size + cumsum_dt = dpt.int32 if mask_nelems < int32_t_max else dpt.int64 + cumsum = dpt.empty( + mask_nelems, dtype=cumsum_dt, sycl_queue=exec_q, order="C" + ) + _manager = dpctl.utils.SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + mask_count = ti.mask_positions( + ary, cumsum, sycl_queue=exec_q, depends=dep_evs + ) + indexes_dt = ti.default_device_index_type(exec_q.sycl_device) + indexes = dpt.empty( + (ary.ndim, mask_count), + dtype=indexes_dt, + usm_type=usm_type, + sycl_queue=exec_q, + order="C", + ) + hev, nz_ev = ti._nonzero(cumsum, indexes, ary.shape, exec_q) + res = tuple(indexes[i, :] for i in range(ary.ndim)) + _manager.add_event_pair(hev, nz_ev) + return res + + +def _prepare_indices_arrays(inds, q, usm_type): + """ + Utility taking a mix of usm_ndarray and possibly Python int scalar indices, + a queue (assumed to be common to arrays in inds), and a usm type. + + Python scalar integers are promoted to arrays on the provided queue and + with the provided usm type. All arrays are then promoted to a common + integral type (if possible) before being broadcast to a common shape. + """ + # scalar integers -> arrays + inds = tuple( + map( + lambda ind: ( + ind + if isinstance(ind, dpt.usm_ndarray) + else dpt.asarray(ind, usm_type=usm_type, sycl_queue=q) + ), + inds, + ) + ) + + # promote to a common integral type if possible + ind_dt = dpt.result_type(*inds) + if ind_dt.kind not in "ui": + raise ValueError( + "cannot safely promote indices to an integer data type" + ) + inds = tuple( + map( + lambda ind: ( + ind if ind.dtype == ind_dt else dpt.astype(ind, ind_dt) + ), + inds, + ) + ) + + # broadcast + inds = dpt.broadcast_arrays(*inds) + + return inds + + +def _put_multi_index(ary, inds, p, vals, mode=0): + if not isinstance(ary, dpt.usm_ndarray): + raise TypeError( + f"Expecting type dpctl.tensor.usm_ndarray, got {type(ary)}" + ) + ary_nd = ary.ndim + p = normalize_axis_index(operator.index(p), ary_nd) + mode = operator.index(mode) + if mode not in [0, 1]: + raise ValueError( + "Invalid value for mode keyword, only 0 or 1 is supported" + ) + if not isinstance(inds, (list, tuple)): + inds = (inds,) + + exec_q, coerced_usm_type = _get_indices_queue_usm_type( + inds, ary.sycl_queue, ary.usm_type + ) + + if exec_q is not None: + if not isinstance(vals, dpt.usm_ndarray): + vals = dpt.asarray( + vals, + dtype=ary.dtype, + usm_type=coerced_usm_type, + sycl_queue=exec_q, + ) + else: + exec_q = dpctl.utils.get_execution_queue((exec_q, vals.sycl_queue)) + coerced_usm_type = dpctl.utils.get_coerced_usm_type( + ( + coerced_usm_type, + vals.usm_type, + ) + ) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError( + "Can not automatically determine where to allocate the " + "result or performance execution. " + "Use `usm_ndarray.to_device` method to migrate data to " + "be associated with the same queue." + ) + + inds = _prepare_indices_arrays(inds, exec_q, coerced_usm_type) + + ind0 = inds[0] + ary_sh = ary.shape + p_end = p + len(inds) + if 0 in ary_sh[p:p_end] and ind0.size != 0: + raise IndexError( + "cannot put into non-empty indices along an empty axis" + ) + expected_vals_shape = ary_sh[:p] + ind0.shape + ary_sh[p_end:] + if vals.dtype == ary.dtype: + rhs = vals + else: + rhs = dpt_ext.astype(vals, ary.dtype) + rhs = dpt.broadcast_to(rhs, expected_vals_shape) + _manager = dpctl.utils.SequentialOrderManager[exec_q] + dep_ev = _manager.submitted_events + hev, put_ev = ti._put( + dst=ary, + ind=inds, + val=rhs, + axis_start=p, + mode=mode, + sycl_queue=exec_q, + depends=dep_ev, + ) + _manager.add_event_pair(hev, put_ev) + return + + +def _take_multi_index(ary, inds, p, mode=0): + if not isinstance(ary, dpt.usm_ndarray): + raise TypeError( + f"Expecting type dpctl.tensor.usm_ndarray, got {type(ary)}" + ) + ary_nd = ary.ndim + p = normalize_axis_index(operator.index(p), ary_nd) + mode = operator.index(mode) + if mode not in [0, 1]: + raise ValueError( + "Invalid value for mode keyword, only 0 or 1 is supported" + ) + if not isinstance(inds, (list, tuple)): + inds = (inds,) + + exec_q, res_usm_type = _get_indices_queue_usm_type( + inds, ary.sycl_queue, ary.usm_type + ) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError( + "Can not automatically determine where to allocate the " + "result or performance execution. " + "Use `usm_ndarray.to_device` method to migrate data to " + "be associated with the same queue." + ) + + inds = _prepare_indices_arrays(inds, exec_q, res_usm_type) + + ind0 = inds[0] + ary_sh = ary.shape + p_end = p + len(inds) + if 0 in ary_sh[p:p_end] and ind0.size != 0: + raise IndexError("cannot take non-empty indices from an empty axis") + res_shape = ary_sh[:p] + ind0.shape + ary_sh[p_end:] + res = dpt.empty( + res_shape, dtype=ary.dtype, usm_type=res_usm_type, sycl_queue=exec_q + ) + _manager = dpctl.utils.SequentialOrderManager[exec_q] + dep_ev = _manager.submitted_events + hev, take_ev = ti._take( + src=ary, + ind=inds, + dst=res, + axis_start=p, + mode=mode, + sycl_queue=exec_q, + depends=dep_ev, + ) + _manager.add_event_pair(hev, take_ev) + return res + + def from_numpy(np_ary, /, *, device=None, usm_type="device", sycl_queue=None): """ from_numpy(arg, device=None, usm_type="device", sycl_queue=None) diff --git a/dpctl_ext/tensor/_ctors.py b/dpctl_ext/tensor/_ctors.py index 5a39e9367e9c..5a9e07c73346 100644 --- a/dpctl_ext/tensor/_ctors.py +++ b/dpctl_ext/tensor/_ctors.py @@ -58,6 +58,38 @@ def _cast_fill_val(fill_val, dt): return fill_val +def _ensure_native_dtype_device_support(dtype, dev) -> None: + """Check that dtype is natively supported by device. + + Arg: + dtype: + Elemental data-type + dev (:class:`dpctl.SyclDevice`): + The device about which the query is being made. + Returns: + None + Raise: + ValueError: + if device does not natively support this `dtype`. + """ + if dtype in [dpt.float64, dpt.complex128] and not dev.has_aspect_fp64: + raise ValueError( + f"Device {dev.name} does not provide native support " + "for double-precision floating point type." + ) + if ( + dtype + in [ + dpt.float16, + ] + and not dev.has_aspect_fp16 + ): + raise ValueError( + f"Device {dev.name} does not provide native support " + "for half-precision floating point type." + ) + + def _to_scalar(obj, sc_ty): """A way to convert object to NumPy scalar type. Raises OverflowError if obj can not be represented @@ -67,6 +99,102 @@ def _to_scalar(obj, sc_ty): return zd_arr[()] +def eye( + n_rows, + n_cols=None, + /, + *, + k=0, + dtype=None, + order="C", + device=None, + usm_type="device", + sycl_queue=None, +): + """ + eye(n_rows, n_cols=None, /, *, k=0, dtype=None, \ + device=None, usm_type="device", sycl_queue=None) + + Creates :class:`dpctl.tensor.usm_ndarray` with ones on the `k`-th + diagonal. + + Args: + n_rows (int): + number of rows in the output array. + n_cols (int, optional): + number of columns in the output array. If ``None``, + ``n_cols = n_rows``. Default: ``None`` + k (int): + index of the diagonal, with ``0`` as the main diagonal. + A positive value of ``k`` is a superdiagonal, a negative value + is a subdiagonal. + Raises :exc:`TypeError` if ``k`` is not an integer. + Default: ``0`` + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, or + a NumPy scalar type. Default: ``None`` + order ("C" or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): + array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + A diagonal matrix. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + n_rows = operator.index(n_rows) + n_cols = n_rows if n_cols is None else operator.index(n_cols) + k = operator.index(k) + if k >= n_cols or -k >= n_rows: + return dpt.zeros( + (n_rows, n_cols), + dtype=dtype, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = _get_dtype(dtype, sycl_queue) + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = dpt.usm_ndarray( + (n_rows, n_cols), + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + if n_rows != 0 and n_cols != 0: + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + hev, eye_ev = ti._eye(k, dst=res, sycl_queue=sycl_queue) + _manager.add_event_pair(hev, eye_ev) + return res + + def _validate_fill_value(fill_val): """Validates that `fill_val` is a numeric or boolean scalar.""" # TODO: verify if `np.True_` and `np.False_` should be instances of diff --git a/dpctl_ext/tensor/_indexing_functions.py b/dpctl_ext/tensor/_indexing_functions.py index df4f3e953042..6ca327192f73 100644 --- a/dpctl_ext/tensor/_indexing_functions.py +++ b/dpctl_ext/tensor/_indexing_functions.py @@ -37,6 +37,12 @@ import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti +from ._copy_utils import ( + _extract_impl, + _nonzero_impl, + _put_multi_index, + _take_multi_index, +) from ._numpy_helper import normalize_axis_index @@ -50,6 +56,152 @@ def _get_indexing_mode(name): ) +def _range(sh_i, i, nd, q, usm_t, dt): + ind = dpt.arange(sh_i, dtype=dt, usm_type=usm_t, sycl_queue=q) + ind.shape = tuple(sh_i if i == j else 1 for j in range(nd)) + return ind + + +def extract(condition, arr): + """extract(condition, arr) + + Returns the elements of an array that satisfies the condition. + + If ``condition`` is boolean ``dpctl.tensor.extract`` is + equivalent to ``arr[condition]``. + + Note that ``dpctl.tensor.place`` does the opposite of + ``dpctl.tensor.extract``. + + Args: + conditions (usm_ndarray): + An array whose non-zero or ``True`` entries indicate the element + of ``arr`` to extract. + + arr (usm_ndarray): + Input array of the same size as ``condition``. + + Returns: + usm_ndarray: + Rank 1 array of values from ``arr`` where ``condition`` is + ``True``. + """ + if not isinstance(condition, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(condition)}" + ) + if not isinstance(arr, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(arr)}" + ) + exec_q = dpctl.utils.get_execution_queue( + ( + condition.sycl_queue, + arr.sycl_queue, + ) + ) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError + if condition.shape != arr.shape: + raise ValueError("Arrays are not of the same size") + return _extract_impl(arr, condition) + + +def nonzero(arr): + """nonzero(arr) + + Return the indices of non-zero elements. + + Returns a tuple of usm_ndarrays, one for each dimension + of ``arr``, containing the indices of the non-zero elements + in that dimension. The values of ``arr`` are always tested in + row-major, C-style order. + + Args: + arr (usm_ndarray): + Input array, which has non-zero array rank. + + Returns: + Tuple[usm_ndarray, ...]: + Indices of non-zero array elements. + """ + if not isinstance(arr, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(arr)}" + ) + if arr.ndim == 0: + raise ValueError("Array of positive rank is expected") + return _nonzero_impl(arr) + + +def place(arr, mask, vals): + """place(arr, mask, vals) + + Change elements of an array based on conditional and input values. + + If ``mask`` is boolean ``dpctl.tensor.place`` is + equivalent to ``arr[condition] = vals``. + + Args: + arr (usm_ndarray): + Array to put data into. + mask (usm_ndarray): + Boolean mask array. Must have the same size as ``arr``. + vals (usm_ndarray, sequence): + Values to put into ``arr``. Only the first N elements are + used, where N is the number of True values in ``mask``. If + ``vals`` is smaller than N, it will be repeated, and if + elements of ``arr`` are to be masked, this sequence must be + non-empty. Array ``vals`` must be one dimensional. + """ + if not isinstance(arr, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(arr)}" + ) + if not isinstance(mask, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(mask)}" + ) + if not isinstance(vals, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(vals)}" + ) + exec_q = dpctl.utils.get_execution_queue( + ( + arr.sycl_queue, + mask.sycl_queue, + vals.sycl_queue, + ) + ) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError + if arr.shape != mask.shape or vals.ndim != 1: + raise ValueError("Array sizes are not as required") + cumsum = dpt.empty(mask.size, dtype="i8", sycl_queue=exec_q) + _manager = dpctl.utils.SequentialOrderManager[exec_q] + deps_ev = _manager.submitted_events + nz_count = ti.mask_positions( + mask, cumsum, sycl_queue=exec_q, depends=deps_ev + ) + if nz_count == 0: + return + if vals.size == 0: + raise ValueError("Cannot insert from an empty array!") + if vals.dtype == arr.dtype: + rhs = vals + else: + rhs = dpt.astype(vals, arr.dtype) + hev, pl_ev = ti._place( + dst=arr, + cumsum=cumsum, + axis_start=0, + axis_end=mask.ndim, + rhs=rhs, + sycl_queue=exec_q, + ) + _manager.add_event_pair(hev, pl_ev) + + def put(x, indices, vals, /, *, axis=None, mode="wrap"): """put(x, indices, vals, axis=None, mode="wrap") @@ -199,6 +351,86 @@ def put_vec_duplicates(vec, ind, vals): _manager.add_event_pair(hev, put_ev) +def put_along_axis(x, indices, vals, /, *, axis=-1, mode="wrap"): + """ + Puts elements into an array at the one-dimensional indices specified by + ``indices`` along a provided ``axis``. + + Args: + x (usm_ndarray): + input array. Must be compatible with ``indices``, except for the + axis (dimension) specified by ``axis``. + indices (usm_ndarray): + array indices. Must have the same rank (i.e., number of dimensions) + as ``x``. + vals (usm_ndarray): + Array of values to be put into ``x``. + Must be broadcastable to the shape of ``indices``. + axis: int + axis along which to select values. If ``axis`` is negative, the + function determines the axis along which to select values by + counting from the last dimension. Default: ``-1``. + mode (str, optional): + How out-of-bounds indices will be handled. Possible values + are: + + - ``"wrap"``: clamps indices to (``-n <= i < n``), then wraps + negative indices. + - ``"clip"``: clips indices to (``0 <= i < n``). + + Default: ``"wrap"``. + + .. note:: + + If input array ``indices`` contains duplicates, a race condition + occurs, and the value written into corresponding positions in ``x`` + may vary from run to run. Preserving sequential semantics in handing + the duplicates to achieve deterministic behavior requires additional + work. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected dpctl.tensor.usm_ndarray, got {type(x)}") + if not isinstance(indices, dpt.usm_ndarray): + raise TypeError( + f"Expected dpctl.tensor.usm_ndarray, got {type(indices)}" + ) + x_nd = x.ndim + if x_nd != indices.ndim: + raise ValueError( + "Number of dimensions in the first and the second " + "argument arrays must be equal" + ) + pp = normalize_axis_index(operator.index(axis), x_nd) + if isinstance(vals, dpt.usm_ndarray): + queues_ = [x.sycl_queue, indices.sycl_queue, vals.sycl_queue] + usm_types_ = [x.usm_type, indices.usm_type, vals.usm_type] + else: + queues_ = [x.sycl_queue, indices.sycl_queue] + usm_types_ = [x.usm_type, indices.usm_type] + exec_q = dpctl.utils.get_execution_queue(queues_) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments. " + ) + out_usm_type = dpctl.utils.get_coerced_usm_type(usm_types_) + mode_i = _get_indexing_mode(mode) + indexes_dt = ( + dpt.uint64 + if indices.dtype == dpt.uint64 + else ti.default_device_index_type(exec_q.sycl_device) + ) + _ind = tuple( + ( + indices + if i == pp + else _range(x.shape[i], i, x_nd, exec_q, out_usm_type, indexes_dt) + ) + for i in range(x_nd) + ) + return _put_multi_index(x, _ind, 0, vals, mode=mode_i) + + def take(x, indices, /, *, axis=None, out=None, mode="wrap"): """take(x, indices, axis=None, out=None, mode="wrap") @@ -330,3 +562,80 @@ def take(x, indices, /, *, axis=None, out=None, mode="wrap"): out = orig_out return out + + +def take_along_axis(x, indices, /, *, axis=-1, mode="wrap"): + """ + Returns elements from an array at the one-dimensional indices specified + by ``indices`` along a provided ``axis``. + + Args: + x (usm_ndarray): + input array. Must be compatible with ``indices``, except for the + axis (dimension) specified by ``axis``. + indices (usm_ndarray): + array indices. Must have the same rank (i.e., number of dimensions) + as ``x``. + axis: int + axis along which to select values. If ``axis`` is negative, the + function determines the axis along which to select values by + counting from the last dimension. Default: ``-1``. + mode (str, optional): + How out-of-bounds indices will be handled. Possible values + are: + + - ``"wrap"``: clamps indices to (``-n <= i < n``), then wraps + negative indices. + - ``"clip"``: clips indices to (``0 <= i < n``). + + Default: ``"wrap"``. + + Returns: + usm_ndarray: + an array having the same data type as ``x``. The returned array has + the same rank (i.e., number of dimensions) as ``x`` and a shape + determined according to broadcasting rules, except for the axis + (dimension) specified by ``axis`` whose size must equal the size + of the corresponding axis (dimension) in ``indices``. + + Note: + Treatment of the out-of-bound indices in ``indices`` array is controlled + by the value of ``mode`` keyword. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected dpctl.tensor.usm_ndarray, got {type(x)}") + if not isinstance(indices, dpt.usm_ndarray): + raise TypeError( + f"Expected dpctl.tensor.usm_ndarray, got {type(indices)}" + ) + x_nd = x.ndim + if x_nd != indices.ndim: + raise ValueError( + "Number of dimensions in the first and the second " + "argument arrays must be equal" + ) + pp = normalize_axis_index(operator.index(axis), x_nd) + out_usm_type = dpctl.utils.get_coerced_usm_type( + (x.usm_type, indices.usm_type) + ) + exec_q = dpctl.utils.get_execution_queue((x.sycl_queue, indices.sycl_queue)) + if exec_q is None: + raise dpctl.utils.ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments. " + ) + mode_i = _get_indexing_mode(mode) + indexes_dt = ( + dpt.uint64 + if indices.dtype == dpt.uint64 + else ti.default_device_index_type(exec_q.sycl_device) + ) + _ind = tuple( + ( + indices + if i == pp + else _range(x.shape[i], i, x_nd, exec_q, out_usm_type, indexes_dt) + ) + for i in range(x_nd) + ) + return _take_multi_index(x, _ind, 0, mode=mode_i) diff --git a/dpctl_ext/tensor/libtensor/include/kernels/accumulators.hpp b/dpctl_ext/tensor/libtensor/include/kernels/accumulators.hpp new file mode 100644 index 000000000000..6451bc950006 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/accumulators.hpp @@ -0,0 +1,1448 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for accumulators (cumulative sum, prod, etc.). +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "utils/offset_utils.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/sycl_utils.hpp" +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::accumulators +{ + +namespace su_ns = dpctl::tensor::sycl_utils; + +using dpctl::tensor::ssize_t; +using namespace dpctl::tensor::offset_utils; + +template +T ceiling_quotient(T n, T m) +{ + return (n + m - 1) / m; +} + +template +struct NonZeroIndicator +{ + constexpr NonZeroIndicator() {} + + outputT operator()(const inputT &val) const + { + static constexpr outputT out_one(1); + static constexpr outputT out_zero(0); + static constexpr inputT val_zero(0); + + return (val == val_zero) ? out_zero : out_one; + } +}; + +template +struct NoOpTransformer +{ + constexpr NoOpTransformer() {} + + T operator()(const T &val) const + { + return val; + } +}; + +template +struct CastTransformer +{ + constexpr CastTransformer() {} + + dstTy operator()(const srcTy &val) const + { + using dpctl::tensor::type_utils::convert_impl; + return convert_impl(val); + } +}; + +template +struct needs_workaround +{ + // workaround needed due to crash in JITing on CPU + // remove when CMPLRLLVM-65813 is resolved + static constexpr bool value = su_ns::IsSyclLogicalAnd::value || + su_ns::IsSyclLogicalOr::value; +}; + +template +struct can_use_inclusive_scan_over_group +{ + static constexpr bool value = sycl::has_known_identity::value && + !needs_workaround::value; +}; + +namespace detail +{ +template +class stack_t +{ + T *src_; + std::size_t size_; + T *local_scans_; + +public: + stack_t() : src_{}, size_{}, local_scans_{} {} + stack_t(T *src, std::size_t sz, T *local_scans) + : src_(src), size_(sz), local_scans_(local_scans) + { + } + ~stack_t(){}; + + T *get_src_ptr() const + { + return src_; + } + + std::size_t get_size() const + { + return size_; + } + + T *get_local_scans_ptr() const + { + return local_scans_; + } +}; + +template +class stack_strided_t +{ + T *src_; + std::size_t size_; + T *local_scans_; + std::size_t local_stride_; + +public: + stack_strided_t() : src_{}, size_{}, local_scans_{}, local_stride_{} {} + stack_strided_t(T *src, + std::size_t sz, + T *local_scans, + std::size_t local_stride) + : src_(src), size_(sz), local_scans_(local_scans), + local_stride_(local_stride) + { + } + ~stack_strided_t(){}; + + T *get_src_ptr() const + { + return src_; + } + + std::size_t get_size() const + { + return size_; + } + + T *get_local_scans_ptr() const + { + return local_scans_; + } + + std::size_t get_local_stride() const + { + return local_stride_; + } +}; + +} // end of namespace detail + +// Iterative cumulative summation + +using nwiT = std::uint32_t; + +template +class inclusive_scan_iter_local_scan_blocked_krn; + +template +class inclusive_scan_iter_local_scan_striped_krn; + +template +sycl::event inclusive_scan_base_step_blocked( + sycl::queue &exec_q, + const std::uint32_t wg_size, + const std::size_t iter_nelems, + const std::size_t acc_nelems, + const inputT *input, + outputT *output, + const std::size_t s0, + const std::size_t s1, + const IterIndexerT &iter_indexer, + const InpIndexerT &inp_indexer, + const OutIndexerT &out_indexer, + TransformerT transformer, + const ScanOpT &scan_op, + outputT identity, + std::size_t &acc_groups, + const std::vector &depends = {}) +{ + acc_groups = ceiling_quotient(acc_nelems, n_wi * wg_size); + + sycl::event inc_scan_phase1_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using slmT = sycl::local_accessor; + + auto gws = sycl::range<1>(iter_nelems * acc_groups * wg_size); + auto lws = sycl::range<1>(wg_size); + + auto ndRange = sycl::nd_range<1>(gws, lws); + + slmT slm_iscan_tmp(lws, cgh); + + using KernelName = inclusive_scan_iter_local_scan_blocked_krn< + inputT, outputT, n_wi, IterIndexerT, InpIndexerT, OutIndexerT, + TransformerT, ScanOpT, include_initial>; + + cgh.parallel_for(ndRange, [=, slm_iscan_tmp = + std::move(slm_iscan_tmp)]( + sycl::nd_item<1> it) { + const std::size_t gid = it.get_global_id(0); + const std::size_t lid = it.get_local_id(0); + + const std::uint32_t wg_size = it.get_local_range(0); + const std::size_t reduce_chunks = acc_groups * wg_size; + const std::size_t iter_gid = gid / reduce_chunks; + const std::size_t chunk_gid = gid - (iter_gid * reduce_chunks); + + const std::size_t i = chunk_gid * n_wi; + const auto &iter_offsets = iter_indexer(iter_gid); + const auto &inp_iter_offset = iter_offsets.get_first_offset(); + const auto &out_iter_offset = iter_offsets.get_second_offset(); + + std::array local_iscan; + +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + const std::size_t i_m_wi = i + m_wi; + if constexpr (!include_initial) { + local_iscan[m_wi] = + (i_m_wi < acc_nelems) + ? transformer(input[inp_iter_offset + + inp_indexer(s0 + s1 * i_m_wi)]) + : identity; + } + else { + // shift input to the left by a single element relative to + // output + local_iscan[m_wi] = + (i_m_wi < acc_nelems && i_m_wi > 0) + ? transformer( + input[inp_iter_offset + + inp_indexer((s0 + s1 * i_m_wi) - 1)]) + : identity; + } + } + +#pragma unroll + for (nwiT m_wi = 1; m_wi < n_wi; ++m_wi) { + local_iscan[m_wi] = + scan_op(local_iscan[m_wi], local_iscan[m_wi - 1]); + } + // local_iscan is now result of + // inclusive scan of locally stored inputs + + outputT wg_iscan_val; + if constexpr (can_use_inclusive_scan_over_group::value) { + wg_iscan_val = sycl::inclusive_scan_over_group( + it.get_group(), local_iscan.back(), scan_op, identity); + } + else { + wg_iscan_val = su_ns::custom_inclusive_scan_over_group( + it.get_group(), it.get_sub_group(), slm_iscan_tmp, + local_iscan.back(), identity, scan_op); + // ensure all finished reading from SLM, to avoid race condition + // with subsequent writes into SLM + it.barrier(sycl::access::fence_space::local_space); + } + + slm_iscan_tmp[(lid + 1) % wg_size] = wg_iscan_val; + it.barrier(sycl::access::fence_space::local_space); + const outputT modifier = (lid == 0) ? identity : slm_iscan_tmp[lid]; + +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + local_iscan[m_wi] = scan_op(local_iscan[m_wi], modifier); + } + + const std::size_t start = std::min(i, acc_nelems); + const std::size_t end = std::min(i + n_wi, acc_nelems); + const nwiT m_max = static_cast(end - start); + for (nwiT m_wi = 0; m_wi < m_max; ++m_wi) { + output[out_iter_offset + out_indexer(i + m_wi)] = + local_iscan[m_wi]; + } + }); + }); + + return inc_scan_phase1_ev; +} + +template +sycl::event inclusive_scan_base_step_striped( + sycl::queue &exec_q, + const std::uint32_t wg_size, + const std::size_t iter_nelems, + const std::size_t acc_nelems, + const inputT *input, + outputT *output, + const std::size_t s0, + const std::size_t s1, + const IterIndexerT &iter_indexer, + const InpIndexerT &inp_indexer, + const OutIndexerT &out_indexer, + TransformerT transformer, + const ScanOpT &scan_op, + outputT identity, + std::size_t &acc_groups, + const std::vector &depends = {}) +{ + const std::uint32_t reduce_nelems_per_wg = n_wi * wg_size; + acc_groups = + ceiling_quotient(acc_nelems, reduce_nelems_per_wg); + + sycl::event inc_scan_phase1_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using slmT = sycl::local_accessor; + + const auto &gRange = sycl::range<1>{iter_nelems * acc_groups * wg_size}; + const auto &lRange = sycl::range<1>{wg_size}; + + const auto &ndRange = sycl::nd_range<1>{gRange, lRange}; + + slmT slm_iscan_tmp(reduce_nelems_per_wg, cgh); + + using KernelName = inclusive_scan_iter_local_scan_striped_krn< + inputT, outputT, n_wi, IterIndexerT, InpIndexerT, OutIndexerT, + TransformerT, ScanOpT, include_initial>; + + cgh.parallel_for(ndRange, [=, slm_iscan_tmp = + std::move(slm_iscan_tmp)]( + sycl::nd_item<1> it) { + const std::uint32_t lid = it.get_local_linear_id(); + const std::uint32_t wg_size = it.get_local_range(0); + + const auto &sg = it.get_sub_group(); + const std::uint32_t sgSize = sg.get_max_local_range()[0]; + const std::size_t sgroup_id = sg.get_group_id()[0]; + const std::uint32_t lane_id = sg.get_local_id()[0]; + + const std::size_t flat_group_id = it.get_group(0); + const std::size_t iter_gid = flat_group_id / acc_groups; + const std::size_t acc_group_id = + flat_group_id - (iter_gid * acc_groups); + + const auto &iter_offsets = iter_indexer(iter_gid); + const auto &inp_iter_offset = iter_offsets.get_first_offset(); + const auto &out_iter_offset = iter_offsets.get_second_offset(); + + std::array local_iscan{}; + + const std::size_t inp_id0 = acc_group_id * n_wi * wg_size + + sgroup_id * n_wi * sgSize + lane_id; + +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + const std::size_t inp_id = inp_id0 + m_wi * sgSize; + if constexpr (!include_initial) { + local_iscan[m_wi] = + (inp_id < acc_nelems) + ? transformer(input[inp_iter_offset + + inp_indexer(s0 + s1 * inp_id)]) + : identity; + } + else { + // shift input to the left by a single element relative to + // output + local_iscan[m_wi] = + (inp_id < acc_nelems && inp_id > 0) + ? transformer( + input[inp_iter_offset + + inp_indexer((s0 + s1 * inp_id) - 1)]) + : identity; + } + } + + // change layout from striped to blocked + { + { + const std::uint32_t local_offset0 = lid * n_wi; +#pragma unroll + for (std::uint32_t i = 0; i < n_wi; ++i) { + slm_iscan_tmp[local_offset0 + i] = local_iscan[i]; + } + + it.barrier(sycl::access::fence_space::local_space); + } + + { + const std::uint32_t block_offset = + sgroup_id * sgSize * n_wi; + const std::uint32_t disp0 = lane_id * n_wi; +#pragma unroll + for (nwiT i = 0; i < n_wi; ++i) { + const std::uint32_t disp = disp0 + i; + + // disp == lane_id1 + i1 * sgSize; + const std::uint32_t i1 = disp / sgSize; + const std::uint32_t lane_id1 = disp - i1 * sgSize; + + const std::uint32_t disp_exchanged = + (lane_id1 * n_wi + i1); + + local_iscan[i] = + slm_iscan_tmp[block_offset + disp_exchanged]; + } + + it.barrier(sycl::access::fence_space::local_space); + } + } + +#pragma unroll + for (nwiT m_wi = 1; m_wi < n_wi; ++m_wi) { + local_iscan[m_wi] = + scan_op(local_iscan[m_wi], local_iscan[m_wi - 1]); + } + // local_iscan is now result of + // inclusive scan of locally stored inputs + + outputT wg_iscan_val; + if constexpr (can_use_inclusive_scan_over_group::value) { + wg_iscan_val = sycl::inclusive_scan_over_group( + it.get_group(), local_iscan.back(), scan_op, identity); + } + else { + wg_iscan_val = su_ns::custom_inclusive_scan_over_group( + it.get_group(), sg, slm_iscan_tmp, local_iscan.back(), + identity, scan_op); + // ensure all finished reading from SLM, to avoid race condition + // with subsequent writes into SLM + it.barrier(sycl::access::fence_space::local_space); + } + + slm_iscan_tmp[(lid + 1) % wg_size] = wg_iscan_val; + it.barrier(sycl::access::fence_space::local_space); + const outputT modifier = (lid == 0) ? identity : slm_iscan_tmp[lid]; + +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + local_iscan[m_wi] = scan_op(local_iscan[m_wi], modifier); + } + + it.barrier(sycl::access::fence_space::local_space); + + // convert back to blocked layout + {{const std::uint32_t local_offset0 = lid * n_wi; +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + slm_iscan_tmp[local_offset0 + m_wi] = local_iscan[m_wi]; + } + + it.barrier(sycl::access::fence_space::local_space); + } + } + + { + const std::uint32_t block_offset = sgroup_id * sgSize * n_wi + lane_id; +#pragma unroll + for (nwiT m_wi = 0; m_wi < n_wi; ++m_wi) { + const std::uint32_t m_wi_scaled = m_wi * sgSize; + const std::size_t out_id = inp_id0 + m_wi_scaled; + if (out_id < acc_nelems) { + output[out_iter_offset + out_indexer(out_id)] = + slm_iscan_tmp[block_offset + m_wi_scaled]; + } + } + } +}); +}); + +return inc_scan_phase1_ev; +} + +template +sycl::event + inclusive_scan_base_step(sycl::queue &exec_q, + const std::uint32_t wg_size, + const std::size_t iter_nelems, + const std::size_t acc_nelems, + const inputT *input, + outputT *output, + const std::size_t s0, + const std::size_t s1, + const IterIndexerT &iter_indexer, + const InpIndexerT &inp_indexer, + const OutIndexerT &out_indexer, + TransformerT transformer, + const ScanOpT &scan_op, + outputT identity, + std::size_t &acc_groups, + const std::vector &depends = {}) +{ + // For small stride use striped load/store. + // Threshold value chosen experimentally. + if (s1 <= 16) { + return inclusive_scan_base_step_striped< + inputT, outputT, n_wi, IterIndexerT, InpIndexerT, OutIndexerT, + TransformerT, ScanOpT, include_initial>( + exec_q, wg_size, iter_nelems, acc_nelems, input, output, s0, s1, + iter_indexer, inp_indexer, out_indexer, transformer, scan_op, + identity, acc_groups, depends); + } + else { + return inclusive_scan_base_step_blocked< + inputT, outputT, n_wi, IterIndexerT, InpIndexerT, OutIndexerT, + TransformerT, ScanOpT, include_initial>( + exec_q, wg_size, iter_nelems, acc_nelems, input, output, s0, s1, + iter_indexer, inp_indexer, out_indexer, transformer, scan_op, + identity, acc_groups, depends); + } +} + +template +class inclusive_scan_1d_iter_chunk_update_krn; + +template +sycl::event update_local_chunks_1d(sycl::queue &exec_q, + outputT *src, + std::size_t src_size, + const outputT *local_scans, + std::size_t chunk_size, + const sycl::event &dependent_event) +{ + const auto &ctx = exec_q.get_context(); + const auto &dev = exec_q.get_device(); + + const auto &kernel_id = sycl::get_kernel_id(); + auto kb = sycl::get_kernel_bundle( + ctx, {dev}, {kernel_id}); + auto krn = kb.get_kernel(kernel_id); + + const std::uint32_t sg_size = krn.template get_info< + sycl::info::kernel_device_specific::max_sub_group_size>(dev); + + // output[ chunk_size * (i + 1) + j] += temp[i] + sycl::event update_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(dependent_event); + cgh.use_kernel_bundle(kb); + + static constexpr nwiT updates_per_wi = n_wi; + const std::size_t n_items = + ceiling_quotient(src_size, sg_size * n_wi) * sg_size; + + sycl::range<1> gRange{n_items}; + sycl::range<1> lRange{sg_size}; + sycl::nd_range<1> ndRange{gRange, lRange}; + + cgh.parallel_for( + ndRange, + [chunk_size, src, src_size, local_scans](sycl::nd_item<1> ndit) { + static constexpr ScanOpT scan_op{}; + static constexpr outputT identity = + su_ns::Identity::value; + + const std::uint32_t lws = ndit.get_local_range(0); + const std::size_t block_offset = ndit.get_group(0) * n_wi * lws; +#pragma unroll + for (std::size_t i = 0; i < updates_per_wi; ++i) { + const std::size_t src_id = + block_offset + ndit.get_local_id(0) + i * lws; + if (src_id < src_size) { + const std::size_t scan_id = (src_id / chunk_size); + const outputT modifier = + (scan_id > 0) ? local_scans[scan_id - 1] : identity; + src[src_id] = scan_op(src[src_id], modifier); + } + } + }); + }); + + return update_event; +} + +/* + * output[j] = sum( input[s0 + i * s1], 0 <= i <= j) + * for 0 <= j < n_elems + */ +template +sycl::event inclusive_scan_iter_1d(sycl::queue &exec_q, + const std::uint32_t wg_size, + const std::size_t n_elems, + const inputT *input, + outputT *output, + const std::size_t s0, + const std::size_t s1, + const IndexerT &indexer, + const TransformerT &transformer, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + static constexpr ScanOpT scan_op{}; + static constexpr outputT identity = + su_ns::Identity::value; + + static constexpr std::size_t _iter_nelems = 1; + + using IterIndexerT = dpctl::tensor::offset_utils::TwoZeroOffsets_Indexer; + static constexpr IterIndexerT _no_op_iter_indexer{}; + + using NoOpIndexerT = dpctl::tensor::offset_utils::NoOpIndexer; + static constexpr NoOpIndexerT _no_op_indexer{}; + + std::size_t n_groups; + sycl::event inc_scan_phase1_ev = + inclusive_scan_base_step( + exec_q, wg_size, _iter_nelems, n_elems, input, output, s0, s1, + _no_op_iter_indexer, indexer, _no_op_indexer, transformer, scan_op, + identity, n_groups, depends); + + sycl::event dependent_event = inc_scan_phase1_ev; + if (n_groups > 1) { + const std::size_t chunk_size = wg_size * n_wi; + + // how much of temporary allocation do we need + std::size_t n_groups_ = n_groups; + std::size_t temp_size = 0; + while (n_groups_ > 1) { + const std::size_t this_size = (n_groups_ - 1); + temp_size += this_size; + n_groups_ = ceiling_quotient(this_size, chunk_size); + } + + // allocate + auto temp_owner = + dpctl::tensor::alloc_utils::smart_malloc_device(temp_size, + exec_q); + outputT *temp = temp_owner.get(); + + std::vector> stack{}; + + // inclusive scans over blocks + n_groups_ = n_groups; + outputT *src = output; + outputT *local_scans = temp; + + using NoOpTransformerT = NoOpTransformer; + static constexpr NoOpTransformerT _no_op_transformer{}; + std::size_t size_to_update = n_elems; + while (n_groups_ > 1) { + + const std::size_t src_size = n_groups_ - 1; + dependent_event = + inclusive_scan_base_step( + exec_q, wg_size, _iter_nelems, src_size, src, local_scans, + chunk_size - 1, chunk_size, _no_op_iter_indexer, + _no_op_indexer, _no_op_indexer, _no_op_transformer, scan_op, + identity, n_groups_, // n_groups_ is modified in place + {dependent_event}); + stack.push_back({src, size_to_update, local_scans}); + src = local_scans; + local_scans += src_size; + size_to_update = src_size; + } + + for (std::size_t reverse_stack_id = 0; reverse_stack_id < stack.size(); + ++reverse_stack_id) + { + const std::size_t stack_id = stack.size() - 1 - reverse_stack_id; + + const auto &stack_elem = stack[stack_id]; + outputT *src = stack_elem.get_src_ptr(); + const std::size_t src_size = stack_elem.get_size(); + const outputT *local_scans = stack_elem.get_local_scans_ptr(); + + using UpdateKernelName = + class inclusive_scan_1d_iter_chunk_update_krn; + + dependent_event = update_local_chunks_1d( + exec_q, src, src_size, local_scans, chunk_size, + dependent_event); + } + + sycl::event free_ev = dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {dependent_event}, temp_owner); + + host_tasks.push_back(free_ev); + } + + return dependent_event; +} + +typedef sycl::event (*accumulate_1d_contig_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + char *, + std::vector &, + const std::vector &); + +template +sycl::event + accumulate_1d_contig_impl(sycl::queue &q, + std::size_t n_elems, + const char *src, + char *dst, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + const srcT *src_data_ptr = reinterpret_cast(src); + dstT *dst_data_ptr = reinterpret_cast(dst); + + using NoOpIndexerT = dpctl::tensor::offset_utils::NoOpIndexer; + static constexpr NoOpIndexerT flat_indexer{}; + static constexpr transformerT transformer{}; + + static constexpr std::size_t s0 = 0; + static constexpr std::size_t s1 = 1; + + sycl::event comp_ev; + const sycl::device &dev = q.get_device(); + if (dev.has(sycl::aspect::cpu)) { + static constexpr nwiT n_wi_for_cpu = 8; + const std::uint32_t wg_size = 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, src_data_ptr, dst_data_ptr, s0, s1, + flat_indexer, transformer, host_tasks, depends); + } + else { + static constexpr nwiT n_wi_for_gpu = 4; + // base_scan_striped algorithm does not execute correctly + // on HIP device with wg_size > 64 + const std::uint32_t wg_size = + (q.get_backend() == sycl::backend::ext_oneapi_hip) ? 64 : 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, src_data_ptr, dst_data_ptr, s0, s1, + flat_indexer, transformer, host_tasks, depends); + } + return comp_ev; +} + +template +class inclusive_scan_final_chunk_update_krn; + +template +sycl::event final_update_local_chunks(sycl::queue &exec_q, + std::size_t iter_nelems, + outputT *src, + std::size_t src_size, + const outputT *local_scans, + std::size_t chunk_size, + std::size_t local_stride, + const OutIterIndexerT &out_iter_indexer, + const OutIndexerT &out_indexer, + sycl::event dependent_event) +{ + const auto &kernel_id = sycl::get_kernel_id(); + + auto const &ctx = exec_q.get_context(); + auto const &dev = exec_q.get_device(); + auto kb = sycl::get_kernel_bundle( + ctx, {dev}, {kernel_id}); + + auto krn = kb.get_kernel(kernel_id); + + const std::uint32_t sg_size = krn.template get_info< + sycl::info::kernel_device_specific::max_sub_group_size>(dev); + + static constexpr nwiT updates_per_wi = n_wi; + const std::size_t updates_per_sg = sg_size * updates_per_wi; + const std::size_t update_nelems = + ceiling_quotient(src_size, updates_per_sg) * sg_size; + + sycl::range<2> gRange{iter_nelems, update_nelems}; + sycl::range<2> lRange{1, sg_size}; + + sycl::nd_range<2> ndRange{gRange, lRange}; + + sycl::event update_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(dependent_event); + + cgh.parallel_for( + ndRange, [chunk_size, src_size, local_stride, src, local_scans, + out_iter_indexer, out_indexer](sycl::nd_item<2> ndit) { + static constexpr ScanOpT scan_op{}; + static constexpr outputT identity = + su_ns::Identity::value; + + const std::uint32_t lws = ndit.get_local_range(1); + + const std::size_t iter_gid = ndit.get_group(0); + + const std::size_t src_axis_id0 = + ndit.get_group(1) * updates_per_wi * lws + + ndit.get_local_id(1); + const std::size_t src_iter_id = out_iter_indexer(iter_gid); +#pragma unroll + for (nwiT i = 0; i < updates_per_wi; ++i) { + const std::size_t src_axis_id = src_axis_id0 + i * lws; + const std::size_t src_id = + out_indexer(src_axis_id) + src_iter_id; + + if (src_axis_id < src_size) { + const std::size_t scan_axis_id = + src_axis_id / chunk_size; + const std::size_t scan_id = + scan_axis_id + iter_gid * local_stride; + + const outputT modifier = (scan_axis_id > 0) + ? local_scans[scan_id - 1] + : identity; + + src[src_id] = scan_op(src[src_id], modifier); + } + } + }); + }); + + return update_event; +} + +template +class inclusive_scan_iter_chunk_update_krn; + +template +sycl::event update_local_chunks(sycl::queue &exec_q, + std::size_t iter_nelems, + outputT *src, + std::size_t src_size, + const outputT *local_scans, + std::size_t chunk_size, + std::size_t local_stride, + sycl::event dependent_event) +{ + static constexpr NoOpIndexer out_indexer{}; + static constexpr NoOpIndexer iter_out_indexer{}; + + return final_update_local_chunks( + exec_q, iter_nelems, src, src_size, local_scans, chunk_size, + local_stride, iter_out_indexer, out_indexer, dependent_event); +} + +template +sycl::event inclusive_scan_iter(sycl::queue &exec_q, + const std::uint32_t wg_size, + const std::size_t iter_nelems, + const std::size_t acc_nelems, + const inputT *input, + outputT *output, + const std::size_t s0, + const std::size_t s1, + const InpIterIndexerT &inp_iter_indexer, + const OutIterIndexerT &out_iter_indexer, + const InpIndexerT &inp_indexer, + const OutIndexerT &out_indexer, + const TransformerT &transformer, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + static constexpr ScanOpT scan_op{}; + static constexpr outputT identity = + su_ns::Identity::value; + + using IterIndexerT = + dpctl::tensor::offset_utils::TwoOffsets_CombinedIndexer< + InpIterIndexerT, OutIterIndexerT>; + const IterIndexerT iter_indexer{inp_iter_indexer, out_iter_indexer}; + + std::size_t acc_groups; + sycl::event inc_scan_phase1_ev = + inclusive_scan_base_step( + exec_q, wg_size, iter_nelems, acc_nelems, input, output, s0, s1, + iter_indexer, inp_indexer, out_indexer, transformer, scan_op, + identity, acc_groups, depends); + + sycl::event dependent_event = inc_scan_phase1_ev; + if (acc_groups > 1) { + const std::size_t chunk_size = wg_size * n_wi; + + // how much of temporary allocation do we need + std::size_t acc_groups_ = acc_groups; + std::size_t temp_size = 0; + while (acc_groups_ > 1) { + const std::size_t this_size = (acc_groups_ - 1); + temp_size += this_size; + acc_groups_ = ceiling_quotient(this_size, chunk_size); + } + + // allocate + auto temp_owner = + dpctl::tensor::alloc_utils::smart_malloc_device( + iter_nelems * temp_size, exec_q); + outputT *temp = temp_owner.get(); + + std::vector> stack{}; + + // inclusive scans over blocks + acc_groups_ = acc_groups; + outputT *src = output; + outputT *local_scans = temp; + + using NoOpIndexerT = dpctl::tensor::offset_utils::NoOpIndexer; + static constexpr NoOpIndexerT _no_op_indexer{}; + using NoOpTransformerT = NoOpTransformer; + static constexpr NoOpTransformerT _no_op_transformer{}; + std::size_t size_to_update = acc_nelems; + + { + std::size_t src_size = acc_groups - 1; + using LocalScanIndexerT = + dpctl::tensor::offset_utils::Strided1DIndexer; + const LocalScanIndexerT scan_iter_indexer{/* size */ iter_nelems, + /* step */ src_size}; + + using IterIndexerT = + dpctl::tensor::offset_utils::TwoOffsets_CombinedIndexer< + OutIterIndexerT, LocalScanIndexerT>; + const IterIndexerT iter_indexer_{out_iter_indexer, + scan_iter_indexer}; + + dependent_event = + inclusive_scan_base_step( + exec_q, wg_size, iter_nelems, src_size, src, local_scans, + chunk_size - 1, chunk_size, iter_indexer_, out_indexer, + _no_op_indexer, _no_op_transformer, scan_op, identity, + acc_groups_, // acc_groups_ is modified in place + {dependent_event}); + stack.push_back({src, size_to_update, local_scans, src_size}); + src = local_scans; + local_scans += src_size * iter_nelems; + size_to_update = src_size; + } + + while (acc_groups_ > 1) { + std::size_t src_size = acc_groups_ - 1; + + using LocalScanIndexerT = + dpctl::tensor::offset_utils::Strided1DIndexer; + const LocalScanIndexerT scan1_iter_indexer{ + /* size */ iter_nelems, + /* step */ size_to_update}; + const LocalScanIndexerT scan2_iter_indexer{/* size */ iter_nelems, + /* step */ src_size}; + + using IterIndexerT = + dpctl::tensor::offset_utils::TwoOffsets_CombinedIndexer< + LocalScanIndexerT, LocalScanIndexerT>; + const IterIndexerT iter_indexer_{scan1_iter_indexer, + scan2_iter_indexer}; + + dependent_event = + inclusive_scan_base_step( + exec_q, wg_size, iter_nelems, src_size, src, local_scans, + chunk_size - 1, chunk_size, iter_indexer_, _no_op_indexer, + _no_op_indexer, _no_op_transformer, scan_op, identity, + acc_groups_, // acc_groups_ is modified in place + {dependent_event}); + stack.push_back({src, size_to_update, local_scans, src_size}); + src = local_scans; + local_scans += src_size * iter_nelems; + size_to_update = src_size; + } + + for (std::size_t reverse_stack_id = 0; + reverse_stack_id < stack.size() - 1; ++reverse_stack_id) + { + const std::size_t stack_id = stack.size() - 1 - reverse_stack_id; + + const auto &stack_elem = stack[stack_id]; + outputT *src = stack_elem.get_src_ptr(); + std::size_t src_size = stack_elem.get_size(); + outputT *local_scans = stack_elem.get_local_scans_ptr(); + std::size_t local_stride = stack_elem.get_local_stride(); + + using UpdateKernelName = + class inclusive_scan_iter_chunk_update_krn; + + dependent_event = + update_local_chunks( + exec_q, iter_nelems, src, src_size, local_scans, chunk_size, + local_stride, dependent_event); + } + + // last stack element is always directly to output + { + const auto &stack_elem = stack[0]; + outputT *src = stack_elem.get_src_ptr(); + const std::size_t src_size = stack_elem.get_size(); + outputT *local_scans = stack_elem.get_local_scans_ptr(); + const std::size_t local_stride = stack_elem.get_local_stride(); + + using UpdateKernelName = + class inclusive_scan_final_chunk_update_krn< + outputT, n_wi, OutIterIndexerT, OutIndexerT, ScanOpT>; + + dependent_event = + final_update_local_chunks( + exec_q, iter_nelems, src, src_size, local_scans, chunk_size, + local_stride, out_iter_indexer, out_indexer, + dependent_event); + } + + sycl::event free_ev = dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {dependent_event}, temp_owner); + host_tasks.push_back(free_ev); + } + + return dependent_event; +} + +typedef sycl::event (*accumulate_strided_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + std::size_t, + const char *, + int, + const ssize_t *, + ssize_t, + ssize_t, + int, + const ssize_t *, + char *, + std::vector &, + const std::vector &); + +template +sycl::event + accumulate_strided_impl(sycl::queue &q, + std::size_t iter_nelems, + std::size_t acc_nelems, + const char *src, + int iter_nd, + const ssize_t *iter_shape_strides, + ssize_t inp_iter_offset, + ssize_t out_iter_offset, + int acc_nd, + const ssize_t *acc_shape_strides, + char *dst, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + const srcT *src_data_ptr = reinterpret_cast(src); + dstT *dst_data_ptr = reinterpret_cast(dst); + + using InpIndexerT = dpctl::tensor::offset_utils::StridedIndexer; + const InpIndexerT inp_axis_indexer{acc_nd, 0, acc_shape_strides}; + const InpIndexerT inp_iter_indexer{iter_nd, inp_iter_offset, + iter_shape_strides}; + + using OutIndexerT = dpctl::tensor::offset_utils::UnpackedStridedIndexer; + const OutIndexerT out_axis_indexer{acc_nd, 0, acc_shape_strides, + acc_shape_strides + 2 * acc_nd}; + const OutIndexerT out_iter_indexer{iter_nd, out_iter_offset, + iter_shape_strides, + iter_shape_strides + 2 * iter_nd}; + + static constexpr transformerT transformer{}; + + static constexpr std::size_t s0 = 0; + static constexpr std::size_t s1 = 1; + + const sycl::device &dev = q.get_device(); + sycl::event comp_ev; + if (dev.has(sycl::aspect::cpu)) { + static constexpr nwiT n_wi_for_cpu = 8; + const std::uint32_t wg_size = 256; + comp_ev = + inclusive_scan_iter( + q, wg_size, iter_nelems, acc_nelems, src_data_ptr, dst_data_ptr, + s0, s1, inp_iter_indexer, out_iter_indexer, inp_axis_indexer, + out_axis_indexer, transformer, host_tasks, depends); + } + else { + static constexpr nwiT n_wi_for_gpu = 4; + // base_scan_striped algorithm does not execute correctly + // on HIP device with wg_size > 64 + const std::uint32_t wg_size = + (q.get_backend() == sycl::backend::ext_oneapi_hip) ? 64 : 256; + comp_ev = + inclusive_scan_iter( + q, wg_size, iter_nelems, acc_nelems, src_data_ptr, dst_data_ptr, + s0, s1, inp_iter_indexer, out_iter_indexer, inp_axis_indexer, + out_axis_indexer, transformer, host_tasks, depends); + } + + return comp_ev; +} + +typedef std::size_t (*cumsum_val_contig_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + char *, + std::vector &, + const std::vector &); + +template +std::size_t cumsum_val_contig_impl(sycl::queue &q, + std::size_t n_elems, + const char *mask, + char *cumsum, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + const maskT *mask_data_ptr = reinterpret_cast(mask); + cumsumT *cumsum_data_ptr = reinterpret_cast(cumsum); + + using NoOpIndexerT = dpctl::tensor::offset_utils::NoOpIndexer; + static constexpr NoOpIndexerT flat_indexer{}; + static constexpr transformerT transformer{}; + + static constexpr std::size_t s0 = 0; + static constexpr std::size_t s1 = 1; + static constexpr bool include_initial = false; + using AccumulateOpT = sycl::plus; + + sycl::event comp_ev; + const sycl::device &dev = q.get_device(); + if (dev.has(sycl::aspect::cpu)) { + static constexpr nwiT n_wi_for_cpu = 8; + const std::uint32_t wg_size = 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, mask_data_ptr, cumsum_data_ptr, s0, s1, + flat_indexer, transformer, host_tasks, depends); + } + else { + static constexpr nwiT n_wi_for_gpu = 4; + // base_scan_striped algorithm does not execute correctly + // on HIP device with wg_size > 64 + const std::uint32_t wg_size = + (q.get_backend() == sycl::backend::ext_oneapi_hip) ? 64 : 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, mask_data_ptr, cumsum_data_ptr, s0, s1, + flat_indexer, transformer, host_tasks, depends); + } + cumsumT *last_elem = cumsum_data_ptr + (n_elems - 1); + + auto host_usm_owner = + dpctl::tensor::alloc_utils::smart_malloc_host(1, q); + cumsumT *last_elem_host_usm = host_usm_owner.get(); + + sycl::event copy_e = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(comp_ev); + cgh.copy(last_elem, last_elem_host_usm, 1); + }); + copy_e.wait(); + std::size_t return_val = static_cast(*last_elem_host_usm); + + // explicitly free USM host allocation, by invoking deleter of + // the unique_ptr + host_usm_owner.reset(nullptr); + + return return_val; +} + +template +struct MaskPositionsContigFactoryForInt32 +{ + fnT get() + { + using cumsumT = std::int32_t; + fnT fn = + cumsum_val_contig_impl>; + return fn; + } +}; + +template +struct MaskPositionsContigFactoryForInt64 +{ + fnT get() + { + using cumsumT = std::int64_t; + fnT fn = + cumsum_val_contig_impl>; + return fn; + } +}; + +template +struct Cumsum1DContigFactory +{ + fnT get() + { + if constexpr (std::is_integral_v) { + using cumsumT = std::int64_t; + fnT fn = + cumsum_val_contig_impl>; + return fn; + } + else { + return nullptr; + } + } +}; + +typedef std::size_t (*cumsum_val_strided_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + int, + const ssize_t *, + char *, + std::vector &, + const std::vector &); + +template +std::size_t + cumsum_val_strided_impl(sycl::queue &q, + std::size_t n_elems, + const char *mask, + int nd, + const ssize_t *shape_strides, + char *cumsum, + std::vector &host_tasks, + const std::vector &depends = {}) +{ + const maskT *mask_data_ptr = reinterpret_cast(mask); + cumsumT *cumsum_data_ptr = reinterpret_cast(cumsum); + + using StridedIndexerT = dpctl::tensor::offset_utils::StridedIndexer; + const StridedIndexerT strided_indexer{nd, 0, shape_strides}; + static constexpr transformerT transformer{}; + + static constexpr std::size_t s0 = 0; + static constexpr std::size_t s1 = 1; + static constexpr bool include_initial = false; + using AccumulateOpT = sycl::plus; + + const sycl::device &dev = q.get_device(); + sycl::event comp_ev; + if (dev.has(sycl::aspect::cpu)) { + static constexpr nwiT n_wi_for_cpu = 8; + const std::uint32_t wg_size = 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, mask_data_ptr, cumsum_data_ptr, s0, s1, + strided_indexer, transformer, host_tasks, depends); + } + else { + static constexpr nwiT n_wi_for_gpu = 4; + // base_scan_striped algorithm does not execute correctly + // on HIP device with wg_size > 64 + const std::uint32_t wg_size = + (q.get_backend() == sycl::backend::ext_oneapi_hip) ? 64 : 256; + comp_ev = inclusive_scan_iter_1d( + q, wg_size, n_elems, mask_data_ptr, cumsum_data_ptr, s0, s1, + strided_indexer, transformer, host_tasks, depends); + } + + cumsumT *last_elem = cumsum_data_ptr + (n_elems - 1); + + auto host_usm_owner = + dpctl::tensor::alloc_utils::smart_malloc_host(1, q); + cumsumT *last_elem_host_usm = host_usm_owner.get(); + + sycl::event copy_e = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(comp_ev); + cgh.copy(last_elem, last_elem_host_usm, 1); + }); + copy_e.wait(); + std::size_t return_val = static_cast(*last_elem_host_usm); + + // explicitly free USM-host temporary, by invoking deleter of + // the unique_ptr + host_usm_owner.reset(nullptr); + + return return_val; +} + +template +struct MaskPositionsStridedFactoryForInt32 +{ + fnT get() + { + using cumsumT = std::int32_t; + fnT fn = + cumsum_val_strided_impl>; + return fn; + } +}; + +template +struct MaskPositionsStridedFactoryForInt64 +{ + fnT get() + { + using cumsumT = std::int64_t; + fnT fn = + cumsum_val_strided_impl>; + return fn; + } +}; + +template +struct Cumsum1DStridedFactory +{ + fnT get() + { + if constexpr (std::is_integral_v) { + using cumsumT = std::int64_t; + fnT fn = + cumsum_val_strided_impl>; + return fn; + } + else { + return nullptr; + } + } +}; + +} // namespace dpctl::tensor::kernels::accumulators diff --git a/dpctl_ext/tensor/libtensor/include/kernels/boolean_advanced_indexing.hpp b/dpctl_ext/tensor/libtensor/include/kernels/boolean_advanced_indexing.hpp new file mode 100644 index 000000000000..046ad87d7d78 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/boolean_advanced_indexing.hpp @@ -0,0 +1,853 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for advanced tensor index operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "utils/offset_utils.hpp" +#include "utils/type_dispatch_building.hpp" + +namespace dpctl::tensor::kernels::indexing +{ + +using dpctl::tensor::ssize_t; +using namespace dpctl::tensor::offset_utils; + +template +struct MaskedExtractStridedFunctor +{ + MaskedExtractStridedFunctor(const dataT *src_data_p, + const indT *cumsum_data_p, + dataT *dst_data_p, + std::size_t masked_iter_size, + const OrthogIndexerT &orthog_src_dst_indexer_, + const MaskedSrcIndexerT &masked_src_indexer_, + const MaskedDstIndexerT &masked_dst_indexer_, + const LocalAccessorT &lacc_) + : src(src_data_p), cumsum(cumsum_data_p), dst(dst_data_p), + masked_nelems(masked_iter_size), + orthog_src_dst_indexer(orthog_src_dst_indexer_), + masked_src_indexer(masked_src_indexer_), + masked_dst_indexer(masked_dst_indexer_), lacc(lacc_) + { + static_assert( + std::is_same_v); + } + + void operator()(sycl::nd_item<2> ndit) const + { + const std::size_t orthog_i = ndit.get_global_id(0); + const std::uint32_t l_i = ndit.get_local_id(1); + const std::uint32_t lws = ndit.get_local_range(1); + + const std::size_t masked_i = ndit.get_global_id(1); + const std::size_t masked_block_start = masked_i - l_i; + + const std::size_t max_offset = masked_nelems + 1; + for (std::uint32_t i = l_i; i < lacc.size(); i += lws) { + const std::size_t offset = masked_block_start + i; + lacc[i] = (offset == 0) ? indT(0) + : (offset < max_offset) ? cumsum[offset - 1] + : cumsum[masked_nelems - 1] + 1; + } + + sycl::group_barrier(ndit.get_group()); + + const indT current_running_count = lacc[l_i + 1]; + const bool mask_set = (masked_i == 0) + ? (current_running_count == 1) + : (current_running_count == lacc[l_i] + 1); + + // dst[cumsum[i] - 1, j] = src[i, j] + // if cumsum[i] == ((i > 0) ? cumsum[i-1] + 1 : 1) + if (mask_set && (masked_i < masked_nelems)) { + const auto &orthog_offsets = orthog_src_dst_indexer(orthog_i); + + const std::size_t total_src_offset = + masked_src_indexer(masked_i) + + orthog_offsets.get_first_offset(); + const std::size_t total_dst_offset = + masked_dst_indexer(current_running_count - 1) + + orthog_offsets.get_second_offset(); + + dst[total_dst_offset] = src[total_src_offset]; + } + } + +private: + const dataT *src = nullptr; + const indT *cumsum = nullptr; + dataT *dst = nullptr; + std::size_t masked_nelems = 0; + // has nd, shape, src_strides, dst_strides for + // dimensions that ARE NOT masked + OrthogIndexerT orthog_src_dst_indexer; + // has nd, shape, src_strides for + // dimensions that ARE masked + MaskedSrcIndexerT masked_src_indexer; + // has 1, dst_strides for dimensions that ARE masked + MaskedDstIndexerT masked_dst_indexer; + LocalAccessorT lacc; +}; + +template +struct MaskedPlaceStridedFunctor +{ + MaskedPlaceStridedFunctor(dataT *dst_data_p, + const indT *cumsum_data_p, + const dataT *rhs_data_p, + std::size_t masked_iter_size, + const OrthogIndexerT &orthog_dst_rhs_indexer_, + const MaskedDstIndexerT &masked_dst_indexer_, + const MaskedRhsIndexerT &masked_rhs_indexer_, + const LocalAccessorT &lacc_) + : dst(dst_data_p), cumsum(cumsum_data_p), rhs(rhs_data_p), + masked_nelems(masked_iter_size), + orthog_dst_rhs_indexer(orthog_dst_rhs_indexer_), + masked_dst_indexer(masked_dst_indexer_), + masked_rhs_indexer(masked_rhs_indexer_), lacc(lacc_) + { + static_assert( + std::is_same_v); + } + + void operator()(sycl::nd_item<2> ndit) const + { + const std::size_t orthog_i = ndit.get_global_id(0); + const std::uint32_t l_i = ndit.get_local_id(1); + const std::uint32_t lws = ndit.get_local_range(1); + + const std::size_t masked_i = ndit.get_global_id(1); + const std::size_t masked_block_start = masked_i - l_i; + + const std::size_t max_offset = masked_nelems + 1; + for (std::uint32_t i = l_i; i < lacc.size(); i += lws) { + const std::size_t offset = masked_block_start + i; + lacc[i] = (offset == 0) ? indT(0) + : (offset < max_offset) ? cumsum[offset - 1] + : cumsum[masked_nelems - 1] + 1; + } + + sycl::group_barrier(ndit.get_group()); + + const indT current_running_count = lacc[l_i + 1]; + const bool mask_set = (masked_i == 0) + ? (current_running_count == 1) + : (current_running_count == lacc[l_i] + 1); + + // src[i, j] = rhs[cumsum[i] - 1, j] + // if cumsum[i] == ((i > 0) ? cumsum[i-1] + 1 : 1) + if (mask_set && (masked_i < masked_nelems)) { + const auto &orthog_offsets = orthog_dst_rhs_indexer(orthog_i); + + const std::size_t total_dst_offset = + masked_dst_indexer(masked_i) + + orthog_offsets.get_first_offset(); + const std::size_t total_rhs_offset = + masked_rhs_indexer(current_running_count - 1) + + orthog_offsets.get_second_offset(); + + dst[total_dst_offset] = rhs[total_rhs_offset]; + } + } + +private: + dataT *dst = nullptr; + const indT *cumsum = nullptr; + const dataT *rhs = nullptr; + std::size_t masked_nelems = 0; + // has nd, shape, dst_strides, rhs_strides for + // dimensions that ARE NOT masked + OrthogIndexerT orthog_dst_rhs_indexer; + // has nd, shape, dst_strides for + // dimensions that ARE masked + MaskedDstIndexerT masked_dst_indexer; + // has 1, rhs_strides for dimensions that ARE masked + MaskedRhsIndexerT masked_rhs_indexer; + LocalAccessorT lacc; +}; + +// ======= Masked extraction ================================ + +namespace detail +{ + +template +std::size_t _get_lws_impl(std::size_t n) +{ + if constexpr (sizeof...(IR) == 0) { + return I; + } + else { + return (n < I) ? _get_lws_impl(n) : I; + } +} + +inline std::size_t get_lws(std::size_t n) +{ + static constexpr std::size_t lws0 = 256u; + static constexpr std::size_t lws1 = 128u; + static constexpr std::size_t lws2 = 64u; + return _get_lws_impl(n); +} + +} // end of namespace detail + +template +class masked_extract_all_slices_contig_impl_krn; + +typedef sycl::event (*masked_extract_all_slices_contig_impl_fn_ptr_t)( + sycl::queue &, + ssize_t, + const char *, + const char *, + char *, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event masked_extract_all_slices_contig_impl( + sycl::queue &exec_q, + ssize_t iteration_size, + const char *src_p, + const char *cumsum_p, + char *dst_p, + ssize_t dst_size, // dst is 1D + ssize_t dst_stride, + const std::vector &depends = {}) +{ + static constexpr TwoZeroOffsets_Indexer orthog_src_dst_indexer{}; + + static constexpr NoOpIndexer masked_src_indexer{}; + const Strided1DIndexer masked_dst_indexer(/* size */ dst_size, + /* step */ dst_stride); + + using KernelName = + class masked_extract_all_slices_contig_impl_krn; + + using LocalAccessorT = sycl::local_accessor; + using Impl = + struct MaskedExtractStridedFunctor; + + const std::size_t masked_extent = iteration_size; + + const std::size_t lws = detail::get_lws(masked_extent); + + const std::size_t n_groups = (iteration_size + lws - 1) / lws; + + sycl::range<2> gRange{1, n_groups * lws}; + sycl::range<2> lRange{1, lws}; + + sycl::nd_range<2> ndRange(gRange, lRange); + + const dataT *src_tp = reinterpret_cast(src_p); + const indT *cumsum_tp = reinterpret_cast(cumsum_p); + dataT *dst_tp = reinterpret_cast(dst_p); + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = std::min(lws, masked_extent) + 1; + LocalAccessorT lacc(lacc_size, cgh); + + cgh.parallel_for( + ndRange, Impl(src_tp, cumsum_tp, dst_tp, masked_extent, + orthog_src_dst_indexer, masked_src_indexer, + masked_dst_indexer, lacc)); + }); + + return comp_ev; +} + +template +class masked_extract_all_slices_strided_impl_krn; + +typedef sycl::event (*masked_extract_all_slices_strided_impl_fn_ptr_t)( + sycl::queue &, + ssize_t, + const char *, + const char *, + char *, + int, + ssize_t const *, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event masked_extract_all_slices_strided_impl( + sycl::queue &exec_q, + ssize_t iteration_size, + const char *src_p, + const char *cumsum_p, + char *dst_p, + int nd, + const ssize_t + *packed_src_shape_strides, // [src_shape, src_strides], length 2*nd + ssize_t dst_size, // dst is 1D + ssize_t dst_stride, + const std::vector &depends = {}) +{ + static constexpr TwoZeroOffsets_Indexer orthog_src_dst_indexer{}; + + /* StridedIndexer(int _nd, ssize_t _offset, ssize_t const + * *_packed_shape_strides) */ + const StridedIndexer masked_src_indexer(nd, 0, packed_src_shape_strides); + const Strided1DIndexer masked_dst_indexer(/* size */ dst_size, + /* step */ dst_stride); + + using KernelName = class masked_extract_all_slices_strided_impl_krn< + StridedIndexer, Strided1DIndexer, dataT, indT>; + + using LocalAccessorT = sycl::local_accessor; + using Impl = + struct MaskedExtractStridedFunctor; + + const std::size_t masked_nelems = iteration_size; + + const std::size_t lws = detail::get_lws(masked_nelems); + + const std::size_t n_groups = (masked_nelems + lws - 1) / lws; + + sycl::range<2> gRange{1, n_groups * lws}; + sycl::range<2> lRange{1, lws}; + + sycl::nd_range<2> ndRange(gRange, lRange); + + const dataT *src_tp = reinterpret_cast(src_p); + const indT *cumsum_tp = reinterpret_cast(cumsum_p); + dataT *dst_tp = reinterpret_cast(dst_p); + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = std::min(lws, masked_nelems) + 1; + LocalAccessorT lacc(lacc_size, cgh); + + cgh.parallel_for( + ndRange, Impl(src_tp, cumsum_tp, dst_tp, iteration_size, + orthog_src_dst_indexer, masked_src_indexer, + masked_dst_indexer, lacc)); + }); + + return comp_ev; +} + +typedef sycl::event (*masked_extract_some_slices_strided_impl_fn_ptr_t)( + sycl::queue &, + ssize_t, + ssize_t, + const char *, + const char *, + char *, + int, + ssize_t const *, + ssize_t, + ssize_t, + int, + ssize_t const *, + ssize_t, + ssize_t, + const std::vector &); + +template +class masked_extract_some_slices_strided_impl_krn; + +template +sycl::event masked_extract_some_slices_strided_impl( + sycl::queue &exec_q, + ssize_t orthog_nelems, + ssize_t masked_nelems, + const char *src_p, + const char *cumsum_p, + char *dst_p, + int orthog_nd, + // [ortho_shape, ortho_src_strides, // ortho_dst_strides], + // length 3*ortho_nd + const ssize_t *packed_ortho_src_dst_shape_strides, + ssize_t ortho_src_offset, + ssize_t ortho_dst_offset, + int masked_nd, + // [masked_src_shape, masked_src_strides], + // length 2*masked_nd, mask_dst is 1D + const ssize_t *packed_masked_src_shape_strides, + ssize_t masked_dst_size, + ssize_t masked_dst_stride, + const std::vector &depends = {}) +{ + const TwoOffsets_StridedIndexer orthog_src_dst_indexer{ + orthog_nd, ortho_src_offset, ortho_dst_offset, + packed_ortho_src_dst_shape_strides}; + + const StridedIndexer masked_src_indexer{masked_nd, 0, + packed_masked_src_shape_strides}; + const Strided1DIndexer masked_dst_indexer{/* size */ masked_dst_size, + /* step */ masked_dst_stride}; + + using KernelName = class masked_extract_some_slices_strided_impl_krn< + TwoOffsets_StridedIndexer, StridedIndexer, Strided1DIndexer, dataT, + indT>; + + using LocalAccessorT = sycl::local_accessor; + using Impl = + struct MaskedExtractStridedFunctor; + + const std::size_t masked_extent = masked_nelems; + + const std::size_t lws = detail::get_lws(masked_extent); + + const std::size_t n_groups = ((masked_extent + lws - 1) / lws); + const std::size_t orthog_extent = static_cast(orthog_nelems); + + sycl::range<2> gRange{orthog_extent, n_groups * lws}; + sycl::range<2> lRange{1, lws}; + + sycl::nd_range<2> ndRange(gRange, lRange); + + const dataT *src_tp = reinterpret_cast(src_p); + const indT *cumsum_tp = reinterpret_cast(cumsum_p); + dataT *dst_tp = reinterpret_cast(dst_p); + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = + std::min(lws, masked_extent) + 1; + LocalAccessorT lacc(lacc_size, cgh); + + cgh.parallel_for( + ndRange, Impl(src_tp, cumsum_tp, dst_tp, masked_nelems, + orthog_src_dst_indexer, masked_src_indexer, + masked_dst_indexer, lacc)); + }); + + return comp_ev; +} + +template +struct MaskExtractAllSlicesContigFactoryForInt32 +{ + fnT get() + { + fnT fn = masked_extract_all_slices_contig_impl; + return fn; + } +}; + +template +struct MaskExtractAllSlicesContigFactoryForInt64 +{ + fnT get() + { + fnT fn = masked_extract_all_slices_contig_impl; + return fn; + } +}; + +template +struct MaskExtractAllSlicesStridedFactoryForInt32 +{ + fnT get() + { + fnT fn = masked_extract_all_slices_strided_impl; + return fn; + } +}; + +template +struct MaskExtractAllSlicesStridedFactoryForInt64 +{ + fnT get() + { + fnT fn = masked_extract_all_slices_strided_impl; + return fn; + } +}; + +template +struct MaskExtractSomeSlicesStridedFactoryForInt32 +{ + fnT get() + { + fnT fn = masked_extract_some_slices_strided_impl; + return fn; + } +}; + +template +struct MaskExtractSomeSlicesStridedFactoryForInt64 +{ + fnT get() + { + fnT fn = masked_extract_some_slices_strided_impl; + return fn; + } +}; + +// Masked placement + +template +class masked_place_all_slices_strided_impl_krn; + +typedef sycl::event (*masked_place_all_slices_strided_impl_fn_ptr_t)( + sycl::queue &, + ssize_t, + char *, + const char *, + const char *, + int, + ssize_t const *, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event masked_place_all_slices_strided_impl( + sycl::queue &exec_q, + ssize_t iteration_size, + char *dst_p, + const char *cumsum_p, + const char *rhs_p, + int nd, + const ssize_t + *packed_dst_shape_strides, // [dst_shape, dst_strides], length 2*nd + ssize_t rhs_size, // rhs is 1D + ssize_t rhs_stride, + const std::vector &depends = {}) +{ + static constexpr TwoZeroOffsets_Indexer orthog_dst_rhs_indexer{}; + + /* StridedIndexer(int _nd, ssize_t _offset, ssize_t const + * *_packed_shape_strides) */ + const StridedIndexer masked_dst_indexer(nd, 0, packed_dst_shape_strides); + const Strided1DCyclicIndexer masked_rhs_indexer(0, rhs_size, rhs_stride); + + using KernelName = class masked_place_all_slices_strided_impl_krn< + TwoZeroOffsets_Indexer, StridedIndexer, Strided1DCyclicIndexer, dataT, + indT>; + + static constexpr std::size_t nominal_lws = 256; + const std::size_t masked_extent = iteration_size; + const std::size_t lws = std::min(masked_extent, nominal_lws); + + const std::size_t n_groups = (masked_extent + lws - 1) / lws; + + sycl::range<2> gRange{1, n_groups * lws}; + sycl::range<2> lRange{1, lws}; + sycl::nd_range<2> ndRange{gRange, lRange}; + + using LocalAccessorT = sycl::local_accessor; + using Impl = + MaskedPlaceStridedFunctor; + + dataT *dst_tp = reinterpret_cast(dst_p); + const dataT *rhs_tp = reinterpret_cast(rhs_p); + const indT *cumsum_tp = reinterpret_cast(cumsum_p); + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = std::min(masked_extent, lws) + 1; + LocalAccessorT lacc(lacc_size, cgh); + + cgh.parallel_for( + ndRange, Impl(dst_tp, cumsum_tp, rhs_tp, iteration_size, + orthog_dst_rhs_indexer, masked_dst_indexer, + masked_rhs_indexer, lacc)); + }); + + return comp_ev; +} + +typedef sycl::event (*masked_place_some_slices_strided_impl_fn_ptr_t)( + sycl::queue &, + ssize_t, + ssize_t, + char *, + const char *, + const char *, + int, + ssize_t const *, + ssize_t, + ssize_t, + int, + ssize_t const *, + ssize_t, + ssize_t, + const std::vector &); + +template +class masked_place_some_slices_strided_impl_krn; + +template +sycl::event masked_place_some_slices_strided_impl( + sycl::queue &exec_q, + ssize_t orthog_nelems, + ssize_t masked_nelems, + char *dst_p, + const char *cumsum_p, + const char *rhs_p, + int orthog_nd, + // [ortho_shape, ortho_dst_strides, ortho_rhs_strides], + // length 3*ortho_nd + const ssize_t *packed_ortho_dst_rhs_shape_strides, + ssize_t ortho_dst_offset, + ssize_t ortho_rhs_offset, + int masked_nd, + // [masked_dst_shape, masked_dst_strides], + // length 2*masked_nd, mask_dst is 1D + const ssize_t *packed_masked_dst_shape_strides, + ssize_t masked_rhs_size, + ssize_t masked_rhs_stride, + const std::vector &depends = {}) +{ + const TwoOffsets_StridedIndexer orthog_dst_rhs_indexer{ + orthog_nd, ortho_dst_offset, ortho_rhs_offset, + packed_ortho_dst_rhs_shape_strides}; + + /* StridedIndexer(int _nd, ssize_t _offset, ssize_t const + * *_packed_shape_strides) */ + const StridedIndexer masked_dst_indexer{masked_nd, 0, + packed_masked_dst_shape_strides}; + const Strided1DCyclicIndexer masked_rhs_indexer{0, masked_rhs_size, + masked_rhs_stride}; + + using KernelName = class masked_place_some_slices_strided_impl_krn< + TwoOffsets_StridedIndexer, StridedIndexer, Strided1DCyclicIndexer, + dataT, indT>; + + static constexpr std::size_t nominal_lws = 256; + const std::size_t orthog_extent = orthog_nelems; + const std::size_t masked_extent = masked_nelems; + const std::size_t lws = std::min(masked_extent, nominal_lws); + + const std::size_t n_groups = (masked_extent + lws - 1) / lws; + + sycl::range<2> gRange{orthog_extent, n_groups * lws}; + sycl::range<2> lRange{1, lws}; + sycl::nd_range<2> ndRange{gRange, lRange}; + + using LocalAccessorT = sycl::local_accessor; + using Impl = + MaskedPlaceStridedFunctor; + + dataT *dst_tp = reinterpret_cast(dst_p); + const dataT *rhs_tp = reinterpret_cast(rhs_p); + const indT *cumsum_tp = reinterpret_cast(cumsum_p); + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = std::min(masked_extent, lws) + 1; + LocalAccessorT lacc(lacc_size, cgh); + + cgh.parallel_for( + ndRange, Impl(dst_tp, cumsum_tp, rhs_tp, masked_nelems, + orthog_dst_rhs_indexer, masked_dst_indexer, + masked_rhs_indexer, lacc)); + }); + + return comp_ev; +} + +template +struct MaskPlaceAllSlicesStridedFactoryForInt32 +{ + fnT get() + { + fnT fn = masked_place_all_slices_strided_impl; + return fn; + } +}; + +template +struct MaskPlaceAllSlicesStridedFactoryForInt64 +{ + fnT get() + { + fnT fn = masked_place_all_slices_strided_impl; + return fn; + } +}; + +template +struct MaskPlaceSomeSlicesStridedFactoryForInt32 +{ + fnT get() + { + fnT fn = masked_place_some_slices_strided_impl; + return fn; + } +}; + +template +struct MaskPlaceSomeSlicesStridedFactoryForInt64 +{ + fnT get() + { + fnT fn = masked_place_some_slices_strided_impl; + return fn; + } +}; + +// Non-zero + +template +class non_zero_indexes_krn; + +typedef sycl::event (*non_zero_indexes_fn_ptr_t)( + sycl::queue &, + ssize_t, + ssize_t, + int, + const char *, + char *, + const ssize_t *, + std::vector const &); + +template +sycl::event non_zero_indexes_impl(sycl::queue &exec_q, + ssize_t iter_size, + ssize_t nz_elems, + int nd, + const char *cumsum_cp, + char *indexes_cp, + const ssize_t *mask_shape, + std::vector const &depends) +{ + const indT1 *cumsum_data = reinterpret_cast(cumsum_cp); + indT2 *indexes_data = reinterpret_cast(indexes_cp); + + static constexpr std::size_t nominal_lws = 256u; + const std::size_t masked_extent = iter_size; + const std::size_t lws = std::min(masked_extent, nominal_lws); + + const std::size_t n_groups = (masked_extent + lws - 1) / lws; + sycl::range<1> gRange{n_groups * lws}; + sycl::range<1> lRange{lws}; + + sycl::nd_range<1> ndRange{gRange, lRange}; + + sycl::event comp_ev = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const std::size_t lacc_size = std::min(lws, masked_extent) + 1; + sycl::local_accessor lacc(lacc_size, cgh); + + using KernelName = class non_zero_indexes_krn; + + cgh.parallel_for(ndRange, [=](sycl::nd_item<1> ndit) { + const std::size_t group_i = ndit.get_group(0); + const std::uint32_t l_i = ndit.get_local_id(0); + const std::uint32_t lws = ndit.get_local_range(0); + + const std::size_t masked_block_start = group_i * lws; + + for (std::uint32_t i = l_i; i < lacc.size(); i += lws) { + const std::size_t offset = masked_block_start + i; + lacc[i] = (offset == 0) ? indT1(0) + : (offset - 1 < masked_extent) + ? cumsum_data[offset - 1] + : cumsum_data[masked_extent - 1] + 1; + } + + sycl::group_barrier(ndit.get_group()); + + const std::size_t i = masked_block_start + l_i; + const auto cs_val = lacc[l_i]; + const bool cond = (lacc[l_i + 1] == cs_val + 1); + + if (cond && (i < masked_extent)) { + ssize_t i_ = static_cast(i); + for (int dim = nd; --dim > 0;) { + const auto sd = mask_shape[dim]; + const ssize_t q = i_ / sd; + const ssize_t r = (i_ - q * sd); + indexes_data[cs_val + dim * nz_elems] = + static_cast(r); + i_ = q; + } + indexes_data[cs_val] = static_cast(i_); + } + }); + }); + + return comp_ev; +} + +} // namespace dpctl::tensor::kernels::indexing diff --git a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp index 22189ee3129c..26ae46707a6b 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp @@ -56,7 +56,8 @@ using dpctl::tensor::ssize_t; template class full_strided_kernel; -// template class eye_kernel; +template +class eye_kernel; using namespace dpctl::tensor::offset_utils; @@ -162,6 +163,99 @@ sycl::event full_strided_impl(sycl::queue &q, return fill_ev; } +/* ================ Eye ================== */ + +typedef sycl::event (*eye_fn_ptr_t)(sycl::queue &, + std::size_t nelems, // num_elements + ssize_t start, + ssize_t end, + ssize_t step, + char *, // dst_data_ptr + const std::vector &); + +template +class EyeFunctor +{ +private: + Ty *p = nullptr; + ssize_t start_v; + ssize_t end_v; + ssize_t step_v; + +public: + EyeFunctor(char *dst_p, + const ssize_t v0, + const ssize_t v1, + const ssize_t dv) + : p(reinterpret_cast(dst_p)), start_v(v0), end_v(v1), step_v(dv) + { + } + + void operator()(sycl::id<1> wiid) const + { + Ty set_v = 0; + ssize_t i = static_cast(wiid.get(0)); + if (i >= start_v and i <= end_v) { + if ((i - start_v) % step_v == 0) { + set_v = 1; + } + } + p[i] = set_v; + } +}; + +/*! + * @brief Function to populate 2D array with eye matrix. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Number of elements to assign. + * @param start Position of the first non-zero value. + * @param end Position of the last non-zero value. + * @param step Number of array elements between non-zeros. + * @param array_data Kernel accessible USM pointer for the destination array. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event eye_impl(sycl::queue &exec_q, + std::size_t nelems, + const ssize_t start, + const ssize_t end, + const ssize_t step, + char *array_data, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(exec_q); + sycl::event eye_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + using KernelName = eye_kernel; + using Impl = EyeFunctor; + + cgh.parallel_for(sycl::range<1>{nelems}, + Impl(array_data, start, end, step)); + }); + + return eye_event; +} + +/*! + * @brief Factory to get function pointer of type `fnT` for data type `Ty`. + * @ingroup CtorKernels + */ +template +struct EyeFactory +{ + fnT get() + { + fnT f = eye_impl; + return f; + } +}; + /* =========================== Tril and triu ============================== */ // define function type diff --git a/dpctl_ext/tensor/libtensor/source/accumulators.cpp b/dpctl_ext/tensor/libtensor/source/accumulators.cpp new file mode 100644 index 000000000000..82913010755a --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/accumulators.cpp @@ -0,0 +1,406 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +#include "kernels/accumulators.hpp" +#include "simplify_iteration_space.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +namespace dpctl::tensor::py_internal +{ + +// Computation of positions of masked elements + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::accumulators::cumsum_val_contig_impl_fn_ptr_t; +static cumsum_val_contig_impl_fn_ptr_t + mask_positions_contig_i64_dispatch_vector[td_ns::num_types]; +static cumsum_val_contig_impl_fn_ptr_t + mask_positions_contig_i32_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::accumulators::cumsum_val_strided_impl_fn_ptr_t; +static cumsum_val_strided_impl_fn_ptr_t + mask_positions_strided_i64_dispatch_vector[td_ns::num_types]; +static cumsum_val_strided_impl_fn_ptr_t + mask_positions_strided_i32_dispatch_vector[td_ns::num_types]; + +void populate_mask_positions_dispatch_vectors(void) +{ + using dpctl::tensor::kernels::accumulators:: + MaskPositionsContigFactoryForInt64; + td_ns::DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(mask_positions_contig_i64_dispatch_vector); + + using dpctl::tensor::kernels::accumulators:: + MaskPositionsContigFactoryForInt32; + td_ns::DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(mask_positions_contig_i32_dispatch_vector); + + using dpctl::tensor::kernels::accumulators:: + MaskPositionsStridedFactoryForInt64; + td_ns::DispatchVectorBuilder + dvb3; + dvb3.populate_dispatch_vector(mask_positions_strided_i64_dispatch_vector); + + using dpctl::tensor::kernels::accumulators:: + MaskPositionsStridedFactoryForInt32; + td_ns::DispatchVectorBuilder + dvb4; + dvb4.populate_dispatch_vector(mask_positions_strided_i32_dispatch_vector); + + return; +} + +std::size_t py_mask_positions(const dpctl::tensor::usm_ndarray &mask, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + const std::vector &depends) +{ + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(cumsum); + + // cumsum is 1D + if (cumsum.get_ndim() != 1) { + throw py::value_error("Result array must be one-dimensional."); + } + + if (!cumsum.is_c_contiguous()) { + throw py::value_error("Expecting `cumsum` array must be C-contiguous."); + } + + // cumsum.shape == (mask.size,) + auto mask_size = mask.get_size(); + auto cumsum_size = cumsum.get_shape(0); + if (cumsum_size != mask_size) { + throw py::value_error("Inconsistent dimensions"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {mask, cumsum})) { + // FIXME: use ExecutionPlacementError + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + if (mask_size == 0) { + return 0; + } + + int mask_typenum = mask.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + // mask can be any type + const char *mask_data = mask.get_data(); + char *cumsum_data = cumsum.get_data(); + + auto const &array_types = td_ns::usm_ndarray_types(); + + int mask_typeid = array_types.typenum_to_lookup_id(mask_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + // cumsum must be int32_t/int64_t only + static constexpr int int32_typeid = + static_cast(td_ns::typenum_t::INT32); + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int32_typeid && cumsum_typeid != int64_typeid) { + throw py::value_error( + "Cumulative sum array must have int32 or int64 data-type."); + } + + const bool use_i32 = (cumsum_typeid == int32_typeid); + + std::vector host_task_events; + + if (mask.is_c_contiguous()) { + auto fn = (use_i32) + ? mask_positions_contig_i32_dispatch_vector[mask_typeid] + : mask_positions_contig_i64_dispatch_vector[mask_typeid]; + + std::size_t total_set; + + { + py::gil_scoped_release release; + + total_set = fn(exec_q, mask_size, mask_data, cumsum_data, + host_task_events, depends); + + sycl::event::wait(host_task_events); + } + return total_set; + } + + const py::ssize_t *shape = mask.get_shape_raw(); + auto const &strides_vector = mask.get_strides_vector(); + + using shT = std::vector; + shT compact_shape; + shT compact_strides; + + int mask_nd = mask.get_ndim(); + int nd = mask_nd; + + dpctl::tensor::py_internal::compact_iteration_space( + nd, shape, strides_vector, compact_shape, compact_strides); + + // Strided implementation + auto strided_fn = + (use_i32) ? mask_positions_strided_i32_dispatch_vector[mask_typeid] + : mask_positions_strided_i64_dispatch_vector[mask_typeid]; + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, compact_shape, compact_strides); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *shape_strides = shape_strides_owner.get(); + + if (2 * static_cast(nd) != std::get<1>(ptr_size_event_tuple)) { + { + py::gil_scoped_release release; + + copy_shape_ev.wait(); + sycl::event::wait(host_task_events); + + // ensure deleter of smart pointer is invoked with GIL released + shape_strides_owner.reset(nullptr); + } + throw std::runtime_error("Unexpected error"); + } + + std::vector dependent_events; + dependent_events.reserve(depends.size() + 1); + dependent_events.insert(dependent_events.end(), copy_shape_ev); + dependent_events.insert(dependent_events.end(), depends.begin(), + depends.end()); + + std::size_t total_set; + + { + py::gil_scoped_release release; + + total_set = strided_fn(exec_q, mask_size, mask_data, nd, shape_strides, + cumsum_data, host_task_events, dependent_events); + + sycl::event::wait(host_task_events); + // ensure deleter of smart pointer is invoked with GIL released + shape_strides_owner.reset(nullptr); + } + + return total_set; +} + +using dpctl::tensor::kernels::accumulators::cumsum_val_strided_impl_fn_ptr_t; +static cumsum_val_strided_impl_fn_ptr_t + cumsum_1d_strided_dispatch_vector[td_ns::num_types]; +using dpctl::tensor::kernels::accumulators::cumsum_val_contig_impl_fn_ptr_t; +static cumsum_val_contig_impl_fn_ptr_t + cumsum_1d_contig_dispatch_vector[td_ns::num_types]; + +void populate_cumsum_1d_dispatch_vectors(void) +{ + using dpctl::tensor::kernels::accumulators::Cumsum1DContigFactory; + td_ns::DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(cumsum_1d_contig_dispatch_vector); + + using dpctl::tensor::kernels::accumulators::Cumsum1DStridedFactory; + td_ns::DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(cumsum_1d_strided_dispatch_vector); + + return; +} + +std::size_t py_cumsum_1d(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + std::vector const &depends) +{ + // cumsum is 1D + if (cumsum.get_ndim() != 1) { + throw py::value_error("cumsum array must be one-dimensional."); + } + + if (!cumsum.is_c_contiguous()) { + throw py::value_error("Expecting `cumsum` array to be C-contiguous."); + } + + // cumsum.shape == (src.size,) + auto src_size = src.get_size(); + auto cumsum_size = cumsum.get_shape(0); + if (cumsum_size != src_size) { + throw py::value_error("Inconsistent dimensions"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, cumsum})) { + // FIXME: use ExecutionPlacementError + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(cumsum); + + if (src_size == 0) { + return 0; + } + + int src_typenum = src.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + // src can be any type + const char *src_data = src.get_data(); + char *cumsum_data = cumsum.get_data(); + + auto const &array_types = td_ns::usm_ndarray_types(); + + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + // this cumsum must be int64_t only + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int64_typeid) { + throw py::value_error( + "Cumulative sum array must have int64 data-type."); + } + + std::vector host_task_events; + + if (src.is_c_contiguous()) { + auto fn = cumsum_1d_contig_dispatch_vector[src_typeid]; + if (fn == nullptr) { + throw std::runtime_error( + "this cumsum requires integer type, got src_typeid=" + + std::to_string(src_typeid)); + } + std::size_t total = fn(exec_q, src_size, src_data, cumsum_data, + host_task_events, depends); + { + py::gil_scoped_release release; + sycl::event::wait(host_task_events); + } + return total; + } + + const py::ssize_t *shape = src.get_shape_raw(); + auto const &strides_vector = src.get_strides_vector(); + + using shT = std::vector; + shT compact_shape; + shT compact_strides; + + int src_nd = src.get_ndim(); + int nd = src_nd; + + dpctl::tensor::py_internal::compact_iteration_space( + nd, shape, strides_vector, compact_shape, compact_strides); + + // Strided implementation + auto strided_fn = cumsum_1d_strided_dispatch_vector[src_typeid]; + if (strided_fn == nullptr) { + throw std::runtime_error( + "this cumsum requires integer type, got src_typeid=" + + std::to_string(src_typeid)); + } + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, compact_shape, compact_strides); + auto shape_strides_owner = std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *shape_strides = shape_strides_owner.get(); + + if (2 * static_cast(nd) != std::get<1>(ptr_size_event_tuple)) { + { + py::gil_scoped_release release; + + copy_shape_ev.wait(); + sycl::event::wait(host_task_events); + + // ensure USM deleter is called with GIL released + shape_strides_owner.reset(nullptr); + } + throw std::runtime_error("Unexpected error"); + } + + std::vector dependent_events; + dependent_events.reserve(depends.size() + 1); + dependent_events.insert(dependent_events.end(), copy_shape_ev); + dependent_events.insert(dependent_events.end(), depends.begin(), + depends.end()); + + std::size_t total = + strided_fn(exec_q, src_size, src_data, nd, shape_strides, cumsum_data, + host_task_events, dependent_events); + + { + py::gil_scoped_release release; + sycl::event::wait(host_task_events); + + // ensure USM deleter is called with GIL released + shape_strides_owner.reset(nullptr); + } + + return total; +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/accumulators.hpp b/dpctl_ext/tensor/libtensor/source/accumulators.hpp new file mode 100644 index 000000000000..42503093789b --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/accumulators.hpp @@ -0,0 +1,62 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace dpctl::tensor::py_internal +{ + +extern void populate_mask_positions_dispatch_vectors(void); + +extern std::size_t + py_mask_positions(const dpctl::tensor::usm_ndarray &mask, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void populate_cumsum_1d_dispatch_vectors(void); + +extern std::size_t py_cumsum_1d(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + std::vector const &depends = {}); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp new file mode 100644 index 000000000000..a78cb1750b81 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp @@ -0,0 +1,859 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines implementation functions of dpctl.tensor.place and +/// dpctl.tensor.extract, dpctl.tensor.nonzero +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include + +#include "simplify_iteration_space.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +#include "boolean_advanced_indexing.hpp" +#include "kernels/boolean_advanced_indexing.hpp" + +namespace dpctl::tensor::py_internal +{ + +// Masked extraction + +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::indexing:: + masked_extract_all_slices_strided_impl_fn_ptr_t; + +static masked_extract_all_slices_strided_impl_fn_ptr_t + masked_extract_all_slices_strided_i32_impl_dispatch_vector + [td_ns::num_types]; +static masked_extract_all_slices_strided_impl_fn_ptr_t + masked_extract_all_slices_strided_i64_impl_dispatch_vector + [td_ns::num_types]; + +using dpctl::tensor::kernels::indexing:: + masked_extract_all_slices_contig_impl_fn_ptr_t; + +static masked_extract_all_slices_contig_impl_fn_ptr_t + masked_extract_all_slices_contig_i32_impl_dispatch_vector[td_ns::num_types]; +static masked_extract_all_slices_contig_impl_fn_ptr_t + masked_extract_all_slices_contig_i64_impl_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::indexing:: + masked_extract_some_slices_strided_impl_fn_ptr_t; + +static masked_extract_some_slices_strided_impl_fn_ptr_t + masked_extract_some_slices_strided_i32_impl_dispatch_vector + [td_ns::num_types]; +static masked_extract_some_slices_strided_impl_fn_ptr_t + masked_extract_some_slices_strided_i64_impl_dispatch_vector + [td_ns::num_types]; + +void populate_masked_extract_dispatch_vectors(void) +{ + using dpctl::tensor::kernels::indexing:: + MaskExtractAllSlicesStridedFactoryForInt32; + td_ns::DispatchVectorBuilder< + masked_extract_all_slices_strided_impl_fn_ptr_t, + MaskExtractAllSlicesStridedFactoryForInt32, td_ns::num_types> + dvb1; + dvb1.populate_dispatch_vector( + masked_extract_all_slices_strided_i32_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskExtractAllSlicesStridedFactoryForInt64; + td_ns::DispatchVectorBuilder< + masked_extract_all_slices_strided_impl_fn_ptr_t, + MaskExtractAllSlicesStridedFactoryForInt64, td_ns::num_types> + dvb2; + dvb2.populate_dispatch_vector( + masked_extract_all_slices_strided_i64_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskExtractSomeSlicesStridedFactoryForInt32; + td_ns::DispatchVectorBuilder< + masked_extract_some_slices_strided_impl_fn_ptr_t, + MaskExtractSomeSlicesStridedFactoryForInt32, td_ns::num_types> + dvb3; + dvb3.populate_dispatch_vector( + masked_extract_some_slices_strided_i32_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskExtractSomeSlicesStridedFactoryForInt64; + td_ns::DispatchVectorBuilder< + masked_extract_some_slices_strided_impl_fn_ptr_t, + MaskExtractSomeSlicesStridedFactoryForInt64, td_ns::num_types> + dvb4; + dvb4.populate_dispatch_vector( + masked_extract_some_slices_strided_i64_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskExtractAllSlicesContigFactoryForInt32; + td_ns::DispatchVectorBuilder + dvb5; + dvb5.populate_dispatch_vector( + masked_extract_all_slices_contig_i32_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskExtractAllSlicesContigFactoryForInt64; + td_ns::DispatchVectorBuilder + dvb6; + dvb6.populate_dispatch_vector( + masked_extract_all_slices_contig_i64_impl_dispatch_vector); +} + +std::pair + py_extract(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &cumsum, + int axis_start, // axis_start <= mask_i < axis_end + int axis_end, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + int src_nd = src.get_ndim(); + if ((axis_start < 0 || axis_end > src_nd || axis_start >= axis_end)) { + throw py::value_error("Specified axes_start and axes_end are invalid."); + } + int mask_span_sz = axis_end - axis_start; + + int dst_nd = dst.get_ndim(); + if (src_nd != dst_nd + (mask_span_sz - 1)) { + throw py::value_error("Number of dimensions of source and destination " + "arrays is not consistent"); + } + + if (!cumsum.is_c_contiguous() || cumsum.get_ndim() != 1) { + throw py::value_error("cumsum array must be a C-contiguous vector"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, cumsum, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + py::ssize_t cumsum_sz = cumsum.get_size(); + + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + bool same_ortho_dims(true); + std::size_t ortho_nelems(1); // number of orthogonal iterations + + for (auto i = 0; i < axis_start; ++i) { + auto src_sh_i = src_shape[i]; + ortho_nelems *= src_sh_i; + same_ortho_dims = same_ortho_dims && (src_sh_i == dst_shape[i]); + } + for (auto i = axis_end; i < src_nd; ++i) { + auto src_sh_i = src_shape[i]; + ortho_nelems *= src_sh_i; + same_ortho_dims = + same_ortho_dims && (src_sh_i == dst_shape[i - (mask_span_sz - 1)]); + } + + std::size_t masked_src_nelems(1); + std::size_t masked_dst_nelems(dst_shape[axis_start]); + for (auto i = axis_start; i < axis_end; ++i) { + masked_src_nelems *= src_shape[i]; + } + + // masked_dst_nelems is number of set elements in the mask, or last element + // in cumsum + if (!same_ortho_dims || + (masked_src_nelems != static_cast(cumsum_sz))) + { + throw py::value_error("Inconsistent array dimensions"); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + dst, ortho_nelems * masked_dst_nelems); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src, not with cumsum. + if (overlap(dst, cumsum) || overlap(dst, src)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + static constexpr int int32_typeid = + static_cast(td_ns::typenum_t::INT32); + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int32_typeid && cumsum_typeid != int64_typeid) { + throw py::value_error("Unexpected data type of cumsum array, expecting " + "'int32' or 'int64'"); + } + + const bool use_i32 = (cumsum_typeid == int32_typeid); + + if (src_typeid != dst_typeid) { + throw py::value_error( + "Destination array must have the same elemental data types"); + } + + char *src_data_p = src.get_data(); + char *dst_data_p = dst.get_data(); + char *cumsum_data_p = cumsum.get_data(); + + auto src_shape_vec = src.get_shape_vector(); + auto src_strides_vec = src.get_strides_vector(); + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + sycl::event extract_ev; + std::vector host_task_events{}; + if (axis_start == 0 && axis_end == src_nd) { + assert(dst_shape_vec.size() == 1); + assert(dst_strides_vec.size() == 1); + + if (src.is_c_contiguous()) { + auto fn = + (use_i32) + ? masked_extract_all_slices_contig_i32_impl_dispatch_vector + [src_typeid] + : masked_extract_all_slices_contig_i64_impl_dispatch_vector + [src_typeid]; + + extract_ev = + fn(exec_q, cumsum_sz, src_data_p, cumsum_data_p, dst_data_p, + dst_shape_vec[0], dst_strides_vec[0], depends); + + // + host_task_events.push_back(extract_ev); + } + else { + // empty orthogonal directions + auto fn = + (use_i32) + ? masked_extract_all_slices_strided_i32_impl_dispatch_vector + [src_typeid] + : masked_extract_all_slices_strided_i64_impl_dispatch_vector + [src_typeid]; + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, src_shape_vec, src_strides_vec); + auto packed_src_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_src_shape_strides_ev = + std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_src_shape_strides = + packed_src_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_src_shape_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + extract_ev = fn(exec_q, cumsum_sz, src_data_p, cumsum_data_p, + dst_data_p, src_nd, packed_src_shape_strides, + dst_shape_vec[0], dst_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {extract_ev}, packed_src_shape_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + } + else { + // non-empty orthogonal directions + auto fn = + (use_i32) + ? masked_extract_some_slices_strided_i32_impl_dispatch_vector + [src_typeid] + : masked_extract_some_slices_strided_i64_impl_dispatch_vector + [src_typeid]; + + int masked_src_nd = mask_span_sz; + int ortho_nd = src_nd - masked_src_nd; + + using shT = std::vector; + + shT ortho_src_shape; + shT masked_src_shape; + shT ortho_src_strides; + shT masked_src_strides; + dpctl::tensor::py_internal::split_iteration_space( + src_shape_vec, src_strides_vec, axis_start, axis_end, + ortho_src_shape, + masked_src_shape, // 4 vectors modified + ortho_src_strides, masked_src_strides); + + shT ortho_dst_shape; + shT masked_dst_shape; + shT ortho_dst_strides; + shT masked_dst_strides; + dpctl::tensor::py_internal::split_iteration_space( + dst_shape_vec, dst_strides_vec, axis_start, axis_start + 1, + ortho_dst_shape, + masked_dst_shape, // 4 vectors modified + ortho_dst_strides, masked_dst_strides); + + assert(ortho_src_shape.size() == static_cast(ortho_nd)); + assert(ortho_dst_shape.size() == static_cast(ortho_nd)); + assert(std::equal(ortho_src_shape.begin(), ortho_src_shape.end(), + ortho_dst_shape.begin())); + + std::vector simplified_ortho_shape; + std::vector simplified_ortho_src_strides; + std::vector simplified_ortho_dst_strides; + + const py::ssize_t *_shape = ortho_src_shape.data(); + + py::ssize_t ortho_src_offset(0); + py::ssize_t ortho_dst_offset(0); + + dpctl::tensor::py_internal::simplify_iteration_space( + ortho_nd, _shape, ortho_src_strides, ortho_dst_strides, + // output + simplified_ortho_shape, simplified_ortho_src_strides, + simplified_ortho_dst_strides, ortho_src_offset, ortho_dst_offset); + + assert(masked_dst_shape.size() == 1); + assert(masked_dst_strides.size() == 1); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, simplified_ortho_shape, + simplified_ortho_src_strides, simplified_ortho_dst_strides, + masked_src_shape, masked_src_strides); + auto packed_shapes_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_shapes_strides = + packed_shapes_strides_owner.get(); + + const py::ssize_t *packed_ortho_src_dst_shape_strides = + packed_shapes_strides; + const py::ssize_t *packed_masked_src_shape_strides = + packed_shapes_strides + (3 * ortho_nd); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + // OrthogIndexerT orthog_src_dst_indexer_, MaskedIndexerT + // masked_src_indexer_, MaskedIndexerT masked_dst_indexer_ + extract_ev = fn(exec_q, ortho_nelems, masked_src_nelems, src_data_p, + cumsum_data_p, dst_data_p, + // data to build orthog_src_dst_indexer + ortho_nd, packed_ortho_src_dst_shape_strides, + ortho_src_offset, ortho_dst_offset, + // data to build masked_src_indexer + masked_src_nd, packed_masked_src_shape_strides, + // data to build masked_dst_indexer, + masked_dst_shape[0], masked_dst_strides[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {extract_ev}, packed_shapes_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + + sycl::event py_obj_management_host_task_ev = dpctl::utils::keep_args_alive( + exec_q, {src, cumsum, dst}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, extract_ev); +} + +// Masked placement + +using dpctl::tensor::kernels::indexing:: + masked_place_all_slices_strided_impl_fn_ptr_t; + +static masked_place_all_slices_strided_impl_fn_ptr_t + masked_place_all_slices_strided_i32_impl_dispatch_vector[td_ns::num_types]; +static masked_place_all_slices_strided_impl_fn_ptr_t + masked_place_all_slices_strided_i64_impl_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::indexing:: + masked_place_some_slices_strided_impl_fn_ptr_t; + +static masked_place_some_slices_strided_impl_fn_ptr_t + masked_place_some_slices_strided_i32_impl_dispatch_vector[td_ns::num_types]; +static masked_place_some_slices_strided_impl_fn_ptr_t + masked_place_some_slices_strided_i64_impl_dispatch_vector[td_ns::num_types]; + +void populate_masked_place_dispatch_vectors(void) +{ + using dpctl::tensor::kernels::indexing:: + MaskPlaceAllSlicesStridedFactoryForInt32; + td_ns::DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector( + masked_place_all_slices_strided_i32_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskPlaceAllSlicesStridedFactoryForInt64; + td_ns::DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector( + masked_place_all_slices_strided_i64_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskPlaceSomeSlicesStridedFactoryForInt32; + td_ns::DispatchVectorBuilder + dvb3; + dvb3.populate_dispatch_vector( + masked_place_some_slices_strided_i32_impl_dispatch_vector); + + using dpctl::tensor::kernels::indexing:: + MaskPlaceSomeSlicesStridedFactoryForInt64; + td_ns::DispatchVectorBuilder + dvb4; + dvb4.populate_dispatch_vector( + masked_place_some_slices_strided_i64_impl_dispatch_vector); +} + +/* + * @brief Copy dst[i, ortho_id] = rhs[cumsum[i] - 1, ortho_id] if cumsum[i] == + * ((i > 0) ? cumsum[i-1] + 1 : 1) + */ +std::pair + py_place(const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &cumsum, + int axis_start, // axis_start <= mask_i < axis_end + int axis_end, + const dpctl::tensor::usm_ndarray &rhs, + sycl::queue &exec_q, + const std::vector &depends) +{ + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + int dst_nd = dst.get_ndim(); + if ((axis_start < 0 || axis_end > dst_nd || axis_start >= axis_end)) { + throw py::value_error("Specified axes_start and axes_end are invalid."); + } + int mask_span_sz = axis_end - axis_start; + + int rhs_nd = rhs.get_ndim(); + if (dst_nd != rhs_nd + (mask_span_sz - 1)) { + throw py::value_error("Number of dimensions of source and destination " + "arrays is not consistent"); + } + + if (!cumsum.is_c_contiguous() || cumsum.get_ndim() != 1) { + throw py::value_error("cumsum array must be a C-contiguous vector"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst, cumsum, rhs})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + py::ssize_t cumsum_sz = cumsum.get_size(); + + const py::ssize_t *dst_shape = dst.get_shape_raw(); + const py::ssize_t *rhs_shape = rhs.get_shape_raw(); + bool same_ortho_dims(true); + std::size_t ortho_nelems(1); // number of orthogonal iterations + + for (auto i = 0; i < axis_start; ++i) { + auto dst_sh_i = dst_shape[i]; + ortho_nelems *= dst_sh_i; + same_ortho_dims = same_ortho_dims && (dst_sh_i == rhs_shape[i]); + } + for (auto i = axis_end; i < dst_nd; ++i) { + auto dst_sh_i = dst_shape[i]; + ortho_nelems *= dst_sh_i; + same_ortho_dims = + same_ortho_dims && (dst_sh_i == rhs_shape[i - (mask_span_sz - 1)]); + } + + std::size_t masked_dst_nelems(1); + for (auto i = axis_start; i < axis_end; ++i) { + masked_dst_nelems *= dst_shape[i]; + } + + if (!same_ortho_dims || + (masked_dst_nelems != static_cast(cumsum_sz))) + { + throw py::value_error("Inconsistent array dimensions"); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + dst, ortho_nelems * masked_dst_nelems); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src, not with cumsum. + if (overlap(dst, rhs) || overlap(dst, cumsum)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int dst_typenum = dst.get_typenum(); + int rhs_typenum = rhs.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + int rhs_typeid = array_types.typenum_to_lookup_id(rhs_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + static constexpr int int32_typeid = + static_cast(td_ns::typenum_t::INT32); + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int32_typeid && cumsum_typeid != int64_typeid) { + throw py::value_error("Unexpected data type of cumsum array, expecting " + "'int32' or 'int64'"); + } + + const bool use_i32 = (cumsum_typeid == int32_typeid); + + if (dst_typeid != rhs_typeid) { + throw py::value_error( + "Destination array must have the same elemental data types"); + } + + char *dst_data_p = dst.get_data(); + char *rhs_data_p = rhs.get_data(); + char *cumsum_data_p = cumsum.get_data(); + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + auto rhs_shape_vec = rhs.get_shape_vector(); + auto rhs_strides_vec = rhs.get_strides_vector(); + + sycl::event place_ev; + std::vector host_task_events{}; + if (axis_start == 0 && axis_end == dst_nd) { + // empty orthogonal directions + auto fn = (use_i32) + ? masked_place_all_slices_strided_i32_impl_dispatch_vector + [dst_typeid] + : masked_place_all_slices_strided_i64_impl_dispatch_vector + [dst_typeid]; + + assert(rhs_shape_vec.size() == 1); + assert(rhs_strides_vec.size() == 1); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, dst_shape_vec, dst_strides_vec); + auto packed_dst_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_dst_shape_strides_ev = + std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_dst_shape_strides = + packed_dst_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_dst_shape_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + place_ev = fn(exec_q, cumsum_sz, dst_data_p, cumsum_data_p, rhs_data_p, + dst_nd, packed_dst_shape_strides, rhs_shape_vec[0], + rhs_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {place_ev}, packed_dst_shape_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + else { + // non-empty orthogonal directions + auto fn = + (use_i32) + ? masked_place_some_slices_strided_i32_impl_dispatch_vector + [dst_typeid] + : masked_place_some_slices_strided_i64_impl_dispatch_vector + [dst_typeid]; + + int masked_dst_nd = mask_span_sz; + int ortho_nd = dst_nd - masked_dst_nd; + + using shT = std::vector; + + shT ortho_dst_shape; + shT masked_dst_shape; + shT ortho_dst_strides; + shT masked_dst_strides; + dpctl::tensor::py_internal::split_iteration_space( + dst_shape_vec, dst_strides_vec, axis_start, axis_end, + ortho_dst_shape, + masked_dst_shape, // 4 vectors modified + ortho_dst_strides, masked_dst_strides); + + shT ortho_rhs_shape; + shT masked_rhs_shape; + shT ortho_rhs_strides; + shT masked_rhs_strides; + dpctl::tensor::py_internal::split_iteration_space( + rhs_shape_vec, rhs_strides_vec, axis_start, axis_start + 1, + ortho_rhs_shape, + masked_rhs_shape, // 4 vectors modified + ortho_rhs_strides, masked_rhs_strides); + + assert(ortho_dst_shape.size() == static_cast(ortho_nd)); + assert(ortho_rhs_shape.size() == static_cast(ortho_nd)); + assert(std::equal(ortho_dst_shape.begin(), ortho_dst_shape.end(), + ortho_rhs_shape.begin())); + + std::vector simplified_ortho_shape; + std::vector simplified_ortho_dst_strides; + std::vector simplified_ortho_rhs_strides; + + const py::ssize_t *_shape = ortho_dst_shape.data(); + + py::ssize_t ortho_dst_offset(0); + py::ssize_t ortho_rhs_offset(0); + + dpctl::tensor::py_internal::simplify_iteration_space( + ortho_nd, _shape, ortho_dst_strides, ortho_rhs_strides, + simplified_ortho_shape, simplified_ortho_dst_strides, + simplified_ortho_rhs_strides, ortho_dst_offset, ortho_rhs_offset); + + assert(masked_rhs_shape.size() == 1); + assert(masked_rhs_strides.size() == 1); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, simplified_ortho_shape, + simplified_ortho_dst_strides, simplified_ortho_rhs_strides, + masked_dst_shape, masked_dst_strides); + auto packed_shapes_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_shapes_strides = + packed_shapes_strides_owner.get(); + + const py::ssize_t *packed_ortho_dst_rhs_shape_strides = + packed_shapes_strides; + const py::ssize_t *packed_masked_dst_shape_strides = + packed_shapes_strides + (3 * ortho_nd); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + place_ev = fn(exec_q, ortho_nelems, masked_dst_nelems, dst_data_p, + cumsum_data_p, rhs_data_p, + // data to build orthog_dst_rhs_indexer + ortho_nd, packed_ortho_dst_rhs_shape_strides, + ortho_dst_offset, ortho_rhs_offset, + // data to build masked_dst_indexer + masked_dst_nd, packed_masked_dst_shape_strides, + // data to build masked_dst_indexer, + masked_rhs_shape[0], masked_rhs_strides[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {place_ev}, packed_shapes_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + + sycl::event py_obj_management_host_task_ev = dpctl::utils::keep_args_alive( + exec_q, {dst, cumsum, rhs}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, place_ev); +} + +// Non-zero + +std::pair + py_nonzero(const dpctl::tensor::usm_ndarray + &cumsum, // int32/int64 input array, 1D, C-contiguous + const dpctl::tensor::usm_ndarray + &indexes, // int32/int64 2D output array, C-contiguous + const std::vector + &mask_shape, // shape of array from which cumsum was computed + sycl::queue &exec_q, + const std::vector &depends) +{ + if (!dpctl::utils::queues_are_compatible(exec_q, {cumsum, indexes})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(indexes); + + int cumsum_nd = cumsum.get_ndim(); + if (cumsum_nd != 1 || !cumsum.is_c_contiguous()) { + throw py::value_error("Cumsum array must be a C-contiguous vector"); + } + + int indexes_nd = indexes.get_ndim(); + if (indexes_nd != 2 || !indexes.is_c_contiguous()) { + throw py::value_error("Index array must be a C-contiguous matrix"); + } + + std::size_t _ndim = mask_shape.size(); + if (_ndim > std::numeric_limits::max()) { + throw py::value_error("Shape is too large"); + } + int ndim = static_cast(_ndim); + + const py::ssize_t *indexes_shape = indexes.get_shape_raw(); + + if (ndim != indexes_shape[0]) { + throw py::value_error( + "Length of shape must equal width of index matrix"); + } + + auto cumsum_sz = cumsum.get_size(); + py::ssize_t shape_nelems = + std::accumulate(mask_shape.begin(), mask_shape.end(), py::ssize_t(1), + std::multiplies()); + + if (cumsum_sz != shape_nelems) { + throw py::value_error("Shape and cumsum size are not consistent"); + } + + py::ssize_t nz_elems = indexes_shape[1]; + + int indexes_typenum = indexes.get_typenum(); + auto const &array_types = td_ns::usm_ndarray_types(); + int indexes_typeid = array_types.typenum_to_lookup_id(indexes_typenum); + + int cumsum_typenum = cumsum.get_typenum(); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + constexpr int int32_typeid = static_cast(td_ns::typenum_t::INT32); + constexpr int int64_typeid = static_cast(td_ns::typenum_t::INT64); + + // cumsum must be int32_t or int64_t only + if ((cumsum_typeid != int32_typeid && cumsum_typeid != int64_typeid) || + (indexes_typeid != int32_typeid && indexes_typeid != int64_typeid)) + { + throw py::value_error("Cumulative sum array and index array must have " + "int32 or int64 data-type"); + } + + if (cumsum_sz == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + if (overlap(cumsum, indexes)) { + throw py::value_error("Arrays are expected to ave no memory overlap"); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + indexes, nz_elems * _ndim); + + std::vector host_task_events; + host_task_events.reserve(2); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto mask_shape_copying_tuple = device_allocate_and_pack( + exec_q, host_task_events, mask_shape); + auto src_shape_device_owner = + std::move(std::get<0>(mask_shape_copying_tuple)); + sycl::event copy_ev = std::get<2>(mask_shape_copying_tuple); + const py::ssize_t *src_shape_device_ptr = src_shape_device_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_ev); + + using dpctl::tensor::kernels::indexing::non_zero_indexes_fn_ptr_t; + using dpctl::tensor::kernels::indexing::non_zero_indexes_impl; + + int fn_index = ((cumsum_typeid == int64_typeid) ? 1 : 0) + + ((indexes_typeid == int64_typeid) ? 2 : 0); + std::array fn_impls = { + non_zero_indexes_impl, + non_zero_indexes_impl, + non_zero_indexes_impl, + non_zero_indexes_impl}; + auto fn = fn_impls[fn_index]; + + sycl::event non_zero_indexes_ev = + fn(exec_q, cumsum_sz, nz_elems, ndim, cumsum.get_data(), + indexes.get_data(), src_shape_device_ptr, all_deps); + + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {non_zero_indexes_ev}, src_shape_device_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + sycl::event py_obj_management_host_task_ev = dpctl::utils::keep_args_alive( + exec_q, {cumsum, indexes}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, non_zero_indexes_ev); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.hpp b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.hpp new file mode 100644 index 000000000000..71eafc77b00c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.hpp @@ -0,0 +1,81 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + py_extract(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &cumsum, + int axis_start, // axis_start <= mask_i < axis_end + int axis_end, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void populate_masked_extract_dispatch_vectors(void); + +extern std::pair + py_place(const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &cumsum, + int axis_start, // axis_start <= mask_i < axis_end + int axis_end, + const dpctl::tensor::usm_ndarray &rhs, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void populate_masked_place_dispatch_vectors(void); + +extern std::pair + py_nonzero(const dpctl::tensor::usm_ndarray + &cumsum, // int32 input array, 1D, C-contiguous + const dpctl::tensor::usm_ndarray + &indexes, // int32 2D output array, C-contiguous + const std::vector + &mask_shape, // shape of array from which cumsum was computed + sycl::queue &exec_q, + const std::vector &depends = {}); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/eye_ctor.cpp b/dpctl_ext/tensor/libtensor/source/eye_ctor.cpp new file mode 100644 index 000000000000..025a7d58d06e --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/eye_ctor.cpp @@ -0,0 +1,142 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "eye_ctor.hpp" +#include "kernels/constructors.hpp" +#include "utils/output_validation.hpp" +#include "utils/type_dispatch.hpp" + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace dpctl::tensor::py_internal +{ + +using dpctl::utils::keep_args_alive; + +using dpctl::tensor::kernels::constructors::eye_fn_ptr_t; +static eye_fn_ptr_t eye_dispatch_vector[td_ns::num_types]; + +std::pair + usm_ndarray_eye(py::ssize_t k, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + // dst must be 2D + + if (dst.get_ndim() != 2) { + throw py::value_error( + "usm_ndarray_eye: Expecting 2D array to populate"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error("Execution queue is not compatible with the " + "allocation queue"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_typenum = dst.get_typenum(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + const py::ssize_t nelem = dst.get_size(); + const py::ssize_t rows = dst.get_shape(0); + const py::ssize_t cols = dst.get_shape(1); + if (rows == 0 || cols == 0) { + // nothing to do + return std::make_pair(sycl::event{}, sycl::event{}); + } + + bool is_dst_c_contig = dst.is_c_contiguous(); + bool is_dst_f_contig = dst.is_f_contiguous(); + if (!is_dst_c_contig && !is_dst_f_contig) { + throw py::value_error("USM array is not contiguous"); + } + + py::ssize_t start; + if (is_dst_c_contig) { + start = (k < 0) ? -k * cols : k; + } + else { + start = (k < 0) ? -k : k * rows; + } + + const py::ssize_t *strides = dst.get_strides_raw(); + py::ssize_t step; + if (strides == nullptr) { + step = (is_dst_c_contig) ? cols + 1 : rows + 1; + } + else { + step = strides[0] + strides[1]; + } + + const py::ssize_t length = std::min({rows, cols, rows + k, cols - k}); + const py::ssize_t end = start + step * (length - 1); + + char *dst_data = dst.get_data(); + sycl::event eye_event; + + auto fn = eye_dispatch_vector[dst_typeid]; + + eye_event = fn(exec_q, static_cast(nelem), start, end, step, + dst_data, depends); + + return std::make_pair(keep_args_alive(exec_q, {dst}, {eye_event}), + eye_event); +} + +void init_eye_ctor_dispatch_vectors(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::constructors::EyeFactory; + + DispatchVectorBuilder dvb; + dvb.populate_dispatch_vector(eye_dispatch_vector); + + return; +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/eye_ctor.hpp b/dpctl_ext/tensor/libtensor/source/eye_ctor.hpp new file mode 100644 index 000000000000..dda7f2c4813a --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/eye_ctor.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + usm_ndarray_eye(py::ssize_t k, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_eye_ctor_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 3e5be4d9e8fe..98ab488e5879 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -43,8 +43,8 @@ #include "dpnp4pybind11.hpp" -// #include "accumulators.hpp" -// #include "boolean_advanced_indexing.hpp" +#include "accumulators.hpp" +#include "boolean_advanced_indexing.hpp" // #include "clip.hpp" #include "copy_and_cast_usm_to_usm.hpp" #include "copy_as_contig.hpp" @@ -52,7 +52,7 @@ #include "copy_for_roll.hpp" #include "copy_numpy_ndarray_into_usm_ndarray.hpp" #include "device_support_queries.hpp" -// #include "eye_ctor.hpp" +#include "eye_ctor.hpp" #include "full_ctor.hpp" #include "integer_advanced_indexing.hpp" #include "kernels/dpctl_tensor_types.hpp" @@ -112,19 +112,19 @@ using dpctl::tensor::py_internal::usm_ndarray_zeros; using dpctl::tensor::py_internal::usm_ndarray_put; using dpctl::tensor::py_internal::usm_ndarray_take; -// using dpctl::tensor::py_internal::py_extract; -// using dpctl::tensor::py_internal::py_mask_positions; -// using dpctl::tensor::py_internal::py_nonzero; -// using dpctl::tensor::py_internal::py_place; +using dpctl::tensor::py_internal::py_extract; +using dpctl::tensor::py_internal::py_mask_positions; +using dpctl::tensor::py_internal::py_nonzero; +using dpctl::tensor::py_internal::py_place; /* ================= Repeat ====================*/ -// using dpctl::tensor::py_internal::py_cumsum_1d; +using dpctl::tensor::py_internal::py_cumsum_1d; // using dpctl::tensor::py_internal::py_repeat_by_scalar; // using dpctl::tensor::py_internal::py_repeat_by_sequence; /* ================ Eye ================== */ -// using dpctl::tensor::py_internal::usm_ndarray_eye; +using dpctl::tensor::py_internal::usm_ndarray_eye; /* =========================== Tril and triu ============================== */ @@ -160,15 +160,15 @@ void init_dispatch_vectors(void) // init_linear_sequences_dispatch_vectors(); init_full_ctor_dispatch_vectors(); init_zeros_ctor_dispatch_vectors(); - // init_eye_ctor_dispatch_vectors(); + init_eye_ctor_dispatch_vectors(); init_triul_ctor_dispatch_vectors(); - // populate_masked_extract_dispatch_vectors(); - // populate_masked_place_dispatch_vectors(); + populate_masked_extract_dispatch_vectors(); + populate_masked_place_dispatch_vectors(); - // populate_mask_positions_dispatch_vectors(); + populate_mask_positions_dispatch_vectors(); - // populate_cumsum_1d_dispatch_vectors(); + populate_cumsum_1d_dispatch_vectors(); // init_repeat_dispatch_vectors(); // init_clip_dispatch_vectors(); @@ -348,15 +348,15 @@ PYBIND11_MODULE(_tensor_impl, m) py::arg("mode"), py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_eye", &usm_ndarray_eye, - // "Fills input 2D contiguous usm_ndarray `dst` with " - // "zeros outside of the diagonal " - // "specified by " - // "the diagonal index `k` " - // "which is filled with ones." - // "Returns a tuple of events: (ht_event, comp_event)", - // py::arg("k"), py::arg("dst"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("_eye", &usm_ndarray_eye, + "Fills input 2D contiguous usm_ndarray `dst` with " + "zeros outside of the diagonal " + "specified by " + "the diagonal index `k` " + "which is filled with ones." + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("k"), py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); m.def("default_device_fp_type", dpctl::tensor::py_internal::default_device_fp_type, @@ -408,16 +408,16 @@ PYBIND11_MODULE(_tensor_impl, m) py::arg("dst"), py::arg("k") = 0, py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("mask_positions", &py_mask_positions, "", py::arg("mask"), - // py::arg("cumsum"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("mask_positions", &py_mask_positions, "", py::arg("mask"), + py::arg("cumsum"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); - // m.def("_cumsum_1d", &py_cumsum_1d, "", py::arg("src"), py::arg("cumsum"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_cumsum_1d", &py_cumsum_1d, "", py::arg("src"), py::arg("cumsum"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_extract", &py_extract, "", py::arg("src"), py::arg("cumsum"), - // py::arg("axis_start"), py::arg("axis_end"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_extract", &py_extract, "", py::arg("src"), py::arg("cumsum"), + py::arg("axis_start"), py::arg("axis_end"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); auto overlap = [](const dpctl::tensor::usm_ndarray &x1, const dpctl::tensor::usm_ndarray &x2) -> bool { @@ -438,13 +438,13 @@ PYBIND11_MODULE(_tensor_impl, m) "Determines if the memory regions indexed by each array are the same", py::arg("array1"), py::arg("array2")); - // m.def("_place", &py_place, "", py::arg("dst"), py::arg("cumsum"), - // py::arg("axis_start"), py::arg("axis_end"), py::arg("rhs"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_place", &py_place, "", py::arg("dst"), py::arg("cumsum"), + py::arg("axis_start"), py::arg("axis_end"), py::arg("rhs"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_nonzero", &py_nonzero, "", py::arg("cumsum"), py::arg("indexes"), - // py::arg("mask_shape"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("_nonzero", &py_nonzero, "", py::arg("cumsum"), py::arg("indexes"), + py::arg("mask_shape"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); // m.def("_where", &py_where, "", py::arg("condition"), py::arg("x1"), // py::arg("x2"), py::arg("dst"), py::arg("sycl_queue"), diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index acda579a5f5e..0727b9bfd775 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -196,7 +196,7 @@ def eye( order = "C" """Creates `dpnp_array` with ones on the `k`th diagonal.""" - array_obj = dpt.eye( + array_obj = dpt_ext.eye( N, M, k=k, diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index b0769337c38b..f305b106221f 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -47,9 +47,6 @@ import dpctl.tensor as dpt import dpctl.utils as dpu import numpy -from dpctl.tensor._copy_utils import _nonzero_impl -from dpctl.tensor._indexing_functions import _get_indexing_mode -from dpctl.tensor._numpy_helper import normalize_axis_index # pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` @@ -60,6 +57,9 @@ # pylint: disable=no-name-in-module import dpnp.backend.extensions.indexing._indexing_impl as indexing_ext +from dpctl_ext.tensor._copy_utils import _nonzero_impl +from dpctl_ext.tensor._indexing_functions import _get_indexing_mode +from dpctl_ext.tensor._numpy_helper import normalize_axis_index # pylint: disable=no-name-in-module from .dpnp_algo import ( @@ -817,13 +817,13 @@ def extract(condition, a): usm_a = dpt_ext.reshape(usm_a, -1) usm_cond = dpt_ext.reshape(usm_cond, -1) - usm_res = dpt_ext.take(usm_a, dpt.nonzero(usm_cond)[0]) + usm_res = dpt_ext.take(usm_a, dpt_ext.nonzero(usm_cond)[0]) else: if usm_cond.shape != usm_a.shape: usm_a = dpt_ext.reshape(usm_a, -1) usm_cond = dpt_ext.reshape(usm_cond, -1) - usm_res = dpt.extract(usm_cond, usm_a) + usm_res = dpt_ext.extract(usm_cond, usm_a) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -1546,7 +1546,7 @@ def nonzero(a): usm_a = dpnp.get_usm_ndarray(a) return tuple( - dpnp_array._create_from_usm_ndarray(y) for y in dpt.nonzero(usm_a) + dpnp_array._create_from_usm_ndarray(y) for y in dpt_ext.nonzero(usm_a) ) @@ -1619,7 +1619,7 @@ def place(a, mask, vals): usm_vals, usm_a.dtype, casting="safe", copy=False ) - dpt.place(usm_a, usm_mask, usm_vals) + dpt_ext.place(usm_a, usm_mask, usm_vals) def put(a, ind, v, /, *, axis=None, mode="wrap"): @@ -1807,7 +1807,7 @@ def put_along_axis(a, ind, values, axis, mode="wrap"): values, usm_type=a.usm_type, sycl_queue=a.sycl_queue ) - dpt.put_along_axis(usm_a, usm_ind, usm_vals, axis=axis, mode=mode) + dpt_ext.put_along_axis(usm_a, usm_ind, usm_vals, axis=axis, mode=mode) def putmask(x1, mask, values): @@ -2295,7 +2295,7 @@ def take_along_axis(a, indices, axis=-1, mode="wrap"): usm_a = dpnp.get_usm_ndarray(a) usm_ind = dpnp.get_usm_ndarray(indices) - usm_res = dpt.take_along_axis(usm_a, usm_ind, axis=axis, mode=mode) + usm_res = dpt_ext.take_along_axis(usm_a, usm_ind, axis=axis, mode=mode) return dpnp_array._create_from_usm_ndarray(usm_res) diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index 4866c912ab6a..e988bbaa237b 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -415,7 +415,7 @@ def _get_first_nan_index(usm_a): if first_nan is not None: # all NaNs are collapsed, so need to replace the indices with # the index of the first NaN value in result array of unique values - dpt.place( + dpt_ext.place( usm_res.inverse_indices, usm_res.inverse_indices > first_nan, dpt_ext.reshape(first_nan, 1), From dbf021a62f29fca5cccb21decdea08177b38fc43 Mon Sep 17 00:00:00 2001 From: Vladislav Perevezentsev Date: Wed, 4 Mar 2026 07:13:17 -0800 Subject: [PATCH 04/11] Move ti.proj()/real()/round() and reuse them --- dpctl_ext/tensor/CMakeLists.txt | 6 +- dpctl_ext/tensor/__init__.py | 6 + dpctl_ext/tensor/_elementwise_funcs.py | 89 +++++++ .../kernels/elementwise_functions/proj.hpp | 239 +++++++++++++++++ .../kernels/elementwise_functions/real.hpp | 231 +++++++++++++++++ .../kernels/elementwise_functions/round.hpp | 241 ++++++++++++++++++ .../elementwise_common.cpp | 12 +- .../source/elementwise_functions/proj.cpp | 124 +++++++++ .../source/elementwise_functions/proj.hpp | 46 ++++ .../source/elementwise_functions/real.cpp | 124 +++++++++ .../source/elementwise_functions/real.hpp | 46 ++++ .../source/elementwise_functions/round.cpp | 125 +++++++++ .../source/elementwise_functions/round.hpp | 46 ++++ dpnp/dpnp_iface_mathematical.py | 12 +- 14 files changed, 1332 insertions(+), 15 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/real.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/round.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/real.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/real.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/round.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/round.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 261204223ddd..cd38f97a28ed 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -126,11 +126,11 @@ set(_elementwise_sources #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/not_equal.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/positive.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/pow.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/proj.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/real.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/proj.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/real.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/reciprocal.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/remainder.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/round.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/round.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/rsqrt.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sign.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/signbit.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index a0dfe97c1eca..1949cfe99cbd 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -111,6 +111,9 @@ logical_not, negative, positive, + proj, + real, + round, ) from ._reduction import ( argmax, @@ -207,13 +210,16 @@ "place", "positive", "prod", + "proj", "put", "put_along_axis", + "real", "reduce_hypot", "repeat", "reshape", "result_type", "roll", + "round", "searchsorted", "sort", "squeeze", diff --git a/dpctl_ext/tensor/_elementwise_funcs.py b/dpctl_ext/tensor/_elementwise_funcs.py index b57074ae9784..2b90640d0faa 100644 --- a/dpctl_ext/tensor/_elementwise_funcs.py +++ b/dpctl_ext/tensor/_elementwise_funcs.py @@ -782,6 +782,95 @@ ) del _positive_docstring_ +# U27: ==== REAL (x) +_real_docstring = r""" +real(x, /, \*, out=None, order='K') + +Computes real part of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array. May have any data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise real component of input. + If the input is a real-valued data type, the returned array has + the same data type. If the input is a complex floating-point + data type, the returned array has a floating-point data type + with the same floating-point precision as complex input. +""" + +real = UnaryElementwiseFunc( + "real", ti._real_result_type, ti._real, _real_docstring +) +del _real_docstring + +# U28: ==== ROUND (x) +_round_docstring = r""" +round(x, /, \*, out=None, order='K') + +Rounds each element `x_i` of the input array `x` to +the nearest integer-valued number. + +When two integers are equally close to `x_i`, the result is the nearest even +integer to `x_i`. + +Args: + x (usm_ndarray): + Input array, expected to have a numeric data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise rounded values. +""" + +round = UnaryElementwiseFunc( + "round", ti._round_result_type, ti._round, _round_docstring +) +del _round_docstring + +# U40: ==== PROJ (x) +_proj_docstring = r""" +proj(x, /, \*, out=None, order='K') + +Computes projection of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a complex data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise projection. +""" + +proj = UnaryElementwiseFunc( + "proj", ti._proj_result_type, ti._proj, _proj_docstring +) +del _proj_docstring + # U43: ==== ANGLE (x) _angle_docstring = r""" angle(x, /, \*, out=None, order='K') diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp new file mode 100644 index 000000000000..039da657cfd2 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/proj.hpp @@ -0,0 +1,239 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of PROJ(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::proj +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct ProjFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::false_type; + + resT operator()(const argT &in) const + { + using realT = typename argT::value_type; + const realT x = std::real(in); + const realT y = std::imag(in); + + if (std::isinf(x)) { + return value_at_infinity(y); + } + else if (std::isinf(y)) { + return value_at_infinity(y); + } + else { + return in; + } + } + +private: + template + std::complex value_at_infinity(const T &y) const + { + const T res_im = sycl::copysign(T(0), y); + return std::complex{std::numeric_limits::infinity(), res_im}; + } +}; + +template +using ProjContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using ProjStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct ProjOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct ProjContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class proj_contig_kernel; + +template +sycl::event proj_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using ProjHS = hyperparam_detail::ProjContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = ProjHS::vec_sz; + static constexpr std::uint8_t n_vecs = ProjHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, ProjOutputType, ProjContigFunctor, proj_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct ProjContigFactory +{ + fnT get() + { + if constexpr (!ProjOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + if constexpr (std::is_same_v>) { + fnT fn = proj_contig_impl; + return fn; + } + else { + fnT fn = proj_contig_impl; + return fn; + } + } + } +}; + +template +struct ProjTypeMapFactory +{ + /*! @brief get typeid for output type of std::proj(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename ProjOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class proj_strided_kernel; + +template +sycl::event + proj_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, ProjOutputType, ProjStridedFunctor, proj_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct ProjStridedFactory +{ + fnT get() + { + if constexpr (!ProjOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = proj_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::proj diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/real.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/real.hpp new file mode 100644 index 000000000000..d21a9e6baa7d --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/real.hpp @@ -0,0 +1,231 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of REAL(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::real +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::is_complex_v; + +template +struct RealFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex_v) { + return std::real(in); + } + else { + static_assert(std::is_same_v); + return in; + } + } +}; + +template +using RealContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using RealStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct RealOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, float>, + td_ns::TypeMapResultEntry, double>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct RealContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class real_contig_kernel; + +template +sycl::event real_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using RealHS = hyperparam_detail::RealContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = RealHS::vec_sz; + static constexpr std::uint8_t n_vecs = RealHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, RealOutputType, RealContigFunctor, real_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct RealContigFactory +{ + fnT get() + { + if constexpr (!RealOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = real_contig_impl; + return fn; + } + } +}; + +template +struct RealTypeMapFactory +{ + /*! @brief get typeid for output type of std::real(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename RealOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class real_strided_kernel; + +template +sycl::event + real_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, RealOutputType, RealStridedFunctor, real_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct RealStridedFactory +{ + fnT get() + { + if constexpr (!RealOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = real_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::real diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/round.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/round.hpp new file mode 100644 index 000000000000..b20166a4d505 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/round.hpp @@ -0,0 +1,241 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of ROUND(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::round +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct RoundFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + + if constexpr (std::is_integral_v) { + return in; + } + else if constexpr (is_complex::value) { + using realT = typename argT::value_type; + return resT{round_func(std::real(in)), + round_func(std::imag(in))}; + } + else { + return round_func(in); + } + } + +private: + template + T round_func(const T &input) const + { + return sycl::rint(input); + } +}; + +template +using RoundContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using RoundStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct RoundOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct RoundContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class round_contig_kernel; + +template +sycl::event round_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using RoundHS = hyperparam_detail::RoundContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = RoundHS::vec_sz; + static constexpr std::uint8_t n_vecs = RoundHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, RoundOutputType, RoundContigFunctor, round_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct RoundContigFactory +{ + fnT get() + { + if constexpr (!RoundOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = round_contig_impl; + return fn; + } + } +}; + +template +struct RoundTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::round(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename RoundOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class round_strided_kernel; + +template +sycl::event + round_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, RoundOutputType, RoundStridedFunctor, round_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct RoundStridedFactory +{ + fnT get() + { + if constexpr (!RoundOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = round_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::round diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp index 0a0c02f7ed31..0cb0c190eb6a 100644 --- a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp @@ -89,11 +89,11 @@ // #include "not_equal.hpp" #include "positive.hpp" // #include "pow.hpp" -// #include "proj.hpp" -// #include "real.hpp" +#include "proj.hpp" +#include "real.hpp" // #include "reciprocal.hpp" // #include "remainder.hpp" -// #include "round.hpp" +#include "round.hpp" // #include "rsqrt.hpp" // #include "sign.hpp" // #include "signbit.hpp" @@ -170,11 +170,11 @@ void init_elementwise_functions(py::module_ m) // init_not_equal(m); init_positive(m); // init_pow(m); - // init_proj(m); - // init_real(m); + init_proj(m); + init_real(m); // init_reciprocal(m); // init_remainder(m); - // init_round(m); + init_round(m); // init_rsqrt(m); // init_sign(m); // init_signbit(m); diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.cpp new file mode 100644 index 000000000000..fddc5030f665 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "proj.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/proj.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U40: ==== PROJ (x) +namespace impl +{ + +namespace proj_fn_ns = dpctl::tensor::kernels::proj; + +static unary_contig_impl_fn_ptr_t proj_contig_dispatch_vector[td_ns::num_types]; +static int proj_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + proj_strided_dispatch_vector[td_ns::num_types]; + +void populate_proj_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = proj_fn_ns; + + using fn_ns::ProjContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(proj_contig_dispatch_vector); + + using fn_ns::ProjStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(proj_strided_dispatch_vector); + + using fn_ns::ProjTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(proj_output_typeid_vector); +}; + +} // namespace impl + +void init_proj(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_proj_dispatch_vectors(); + using impl::proj_contig_dispatch_vector; + using impl::proj_output_typeid_vector; + using impl::proj_strided_dispatch_vector; + + auto proj_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, proj_output_typeid_vector, + proj_contig_dispatch_vector, proj_strided_dispatch_vector); + }; + m.def("_proj", proj_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto proj_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, proj_output_typeid_vector); + }; + m.def("_proj_result_type", proj_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.hpp new file mode 100644 index 000000000000..3cdc0e8271b0 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/proj.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_proj(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.cpp new file mode 100644 index 000000000000..de3ec82260aa --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "real.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/real.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U27: ==== REAL (x) +namespace impl +{ + +namespace real_fn_ns = dpctl::tensor::kernels::real; + +static unary_contig_impl_fn_ptr_t real_contig_dispatch_vector[td_ns::num_types]; +static int real_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + real_strided_dispatch_vector[td_ns::num_types]; + +void populate_real_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = real_fn_ns; + + using fn_ns::RealContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(real_contig_dispatch_vector); + + using fn_ns::RealStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(real_strided_dispatch_vector); + + using fn_ns::RealTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(real_output_typeid_vector); +}; + +} // namespace impl + +void init_real(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_real_dispatch_vectors(); + using impl::real_contig_dispatch_vector; + using impl::real_output_typeid_vector; + using impl::real_strided_dispatch_vector; + + auto real_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, real_output_typeid_vector, + real_contig_dispatch_vector, real_strided_dispatch_vector); + }; + m.def("_real", real_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto real_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, real_output_typeid_vector); + }; + m.def("_real_result_type", real_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.hpp new file mode 100644 index 000000000000..81f4743e823b --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/real.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_real(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.cpp new file mode 100644 index 000000000000..d906ecdf07af --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.cpp @@ -0,0 +1,125 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "round.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/round.hpp" + +namespace dpctl::tensor::py_internal +{ +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U28: ==== ROUND (x) +namespace impl +{ + +namespace round_fn_ns = dpctl::tensor::kernels::round; + +static unary_contig_impl_fn_ptr_t + round_contig_dispatch_vector[td_ns::num_types]; +static int round_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + round_strided_dispatch_vector[td_ns::num_types]; + +void populate_round_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = round_fn_ns; + + using fn_ns::RoundContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(round_contig_dispatch_vector); + + using fn_ns::RoundStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(round_strided_dispatch_vector); + + using fn_ns::RoundTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(round_output_typeid_vector); +}; + +} // namespace impl + +void init_round(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_round_dispatch_vectors(); + using impl::round_contig_dispatch_vector; + using impl::round_output_typeid_vector; + using impl::round_strided_dispatch_vector; + + auto round_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, round_output_typeid_vector, + round_contig_dispatch_vector, round_strided_dispatch_vector); + }; + m.def("_round", round_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto round_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + round_output_typeid_vector); + }; + m.def("_round_result_type", round_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.hpp new file mode 100644 index 000000000000..ca56e110eec5 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/round.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_round(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 49e65fa10151..5e3bb23d2261 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -4250,8 +4250,8 @@ def prod( proj = DPNPUnaryFunc( "proj", - ti._proj_result_type, - ti._proj, + ti_ext._proj_result_type, + ti_ext._proj, _PROJ_DOCSTRING, ) @@ -4313,8 +4313,8 @@ def prod( real = DPNPReal( "real", - ti._real_result_type, - ti._real, + ti_ext._real_result_type, + ti_ext._real, _REAL_DOCSTRING, ) @@ -4596,8 +4596,8 @@ def real_if_close(a, tol=100): round = DPNPRound( "round", - ti._round_result_type, - ti._round, + ti_ext._round_result_type, + ti_ext._round, _ROUND_DOCSTRING, mkl_fn_to_call="_mkl_round_to_call", mkl_impl_fn="_round", From 0bc8973d173cea788861240e1725e5456966ba36 Mon Sep 17 00:00:00 2001 From: Vladislav Perevezentsev Date: Wed, 4 Mar 2026 07:28:50 -0800 Subject: [PATCH 05/11] Move ti.sign()/signbit()/sin()/sinh() and reuse them --- dpctl_ext/tensor/CMakeLists.txt | 8 +- dpctl_ext/tensor/__init__.py | 8 + dpctl_ext/tensor/_elementwise_funcs.py | 116 ++++++ .../kernels/elementwise_functions/sign.hpp | 258 ++++++++++++++ .../kernels/elementwise_functions/signbit.hpp | 223 ++++++++++++ .../kernels/elementwise_functions/sin.hpp | 333 ++++++++++++++++++ .../kernels/elementwise_functions/sinh.hpp | 302 ++++++++++++++++ .../elementwise_common.cpp | 16 +- .../source/elementwise_functions/sign.cpp | 124 +++++++ .../source/elementwise_functions/sign.hpp | 46 +++ .../source/elementwise_functions/signbit.cpp | 127 +++++++ .../source/elementwise_functions/signbit.hpp | 46 +++ .../source/elementwise_functions/sin.cpp | 124 +++++++ .../source/elementwise_functions/sin.hpp | 46 +++ .../source/elementwise_functions/sinh.cpp | 124 +++++++ .../source/elementwise_functions/sinh.hpp | 46 +++ dpnp/dpnp_iface_mathematical.py | 8 +- dpnp/dpnp_iface_trigonometric.py | 8 +- 18 files changed, 1943 insertions(+), 20 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sign.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/signbit.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sin.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sinh.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index cd38f97a28ed..85f52f8665ba 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -132,10 +132,10 @@ set(_elementwise_sources #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/remainder.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/round.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/rsqrt.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sign.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/signbit.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sin.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sinh.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sign.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/signbit.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sin.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sinh.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sqrt.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/square.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/subtract.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index 1949cfe99cbd..9d391fdd1bbc 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -114,6 +114,10 @@ proj, real, round, + sign, + signbit, + sin, + sinh, ) from ._reduction import ( argmax, @@ -221,6 +225,10 @@ "roll", "round", "searchsorted", + "sign", + "signbit", + "sin", + "sinh", "sort", "squeeze", "stack", diff --git a/dpctl_ext/tensor/_elementwise_funcs.py b/dpctl_ext/tensor/_elementwise_funcs.py index 2b90640d0faa..dcdd1e53326a 100644 --- a/dpctl_ext/tensor/_elementwise_funcs.py +++ b/dpctl_ext/tensor/_elementwise_funcs.py @@ -844,6 +844,93 @@ ) del _round_docstring +# U29: ==== SIGN (x) +_sign_docstring = r""" +sign(x, /, \*, out=None, order='K') + +Computes an indication of the sign of each element `x_i` of input array `x` +using the signum function. + +The signum function returns `-1` if `x_i` is less than `0`, +`0` if `x_i` is equal to `0`, and `1` if `x_i` is greater than `0`. + +Args: + x (usm_ndarray): + Input array, expected to have a numeric data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise result of the signum function. The + data type of the returned array is determined by the Type Promotion + Rules. +""" + +sign = UnaryElementwiseFunc( + "sign", ti._sign_result_type, ti._sign, _sign_docstring +) +del _sign_docstring + +# U30: ==== SIN (x) +_sin_docstring = r""" +sin(x, /, \*, out=None, order='K') + +Computes sine for each element `x_i` of input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a real-valued floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise sine. The data type of the + returned array is determined by the Type Promotion Rules. +""" + +sin = UnaryElementwiseFunc("sin", ti._sin_result_type, ti._sin, _sin_docstring) +del _sin_docstring + +# U31: ==== SINH (x) +_sinh_docstring = r""" +sinh(x, /, \*, out=None, order='K') + +Computes hyperbolic sine for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise hyperbolic sine. The data type + of the returned array is determined by the Type Promotion Rules. +""" + +sinh = UnaryElementwiseFunc( + "sinh", ti._sinh_result_type, ti._sinh, _sinh_docstring +) +del _sinh_docstring + # U40: ==== PROJ (x) _proj_docstring = r""" proj(x, /, \*, out=None, order='K') @@ -871,6 +958,35 @@ ) del _proj_docstring +# U41: ==== SIGNBIT (x) +_signbit_docstring = r""" +signbit(x, /, \*, out=None, order='K') + +Computes an indication of whether the sign bit of each element `x_i` of +input array `x` is set. + +Args: + x (usm_ndarray): + Input array, expected to have a real-valued floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise signbit results. The returned array + must have a data type of `bool`. +""" + +signbit = UnaryElementwiseFunc( + "signbit", ti._signbit_result_type, ti._signbit, _signbit_docstring +) +del _signbit_docstring + # U43: ==== ANGLE (x) _angle_docstring = r""" angle(x, /, \*, out=None, order='K') diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sign.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sign.hpp new file mode 100644 index 000000000000..ceb3d1320f9c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sign.hpp @@ -0,0 +1,258 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SIGN(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cabs_impl.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::sign +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::vec_cast; + +template +struct SignFunctor +{ + static_assert(std::is_same_v); + using is_constant = typename std::false_type; + // constexpr resT constant_value = resT{}; + using supports_vec = typename std::negation< + std::disjunction, is_complex>>; + using supports_sg_loadstore = std::false_type; + + resT operator()(const argT &in) const + { + if constexpr (std::is_integral_v) { + if constexpr (std::is_unsigned_v) { + return resT(0 < in); + } + else { + return sign_impl(in); + } + } + else { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + if (in == argT(0)) { + return resT(0); + } + else { + auto z = exprm_ns::complex(in); + return (z / detail::cabs(in)); + } + } + else { + if (std::isnan(in)) { + return std::numeric_limits::quiet_NaN(); + } + else { + return sign_impl(in); + } + } + } + } + +private: + template + T sign_impl(const T &v) const + { + return (T(0) < v) - (v < T(0)); + } +}; + +template +using SignContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +struct SignOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SignContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class sign_contig_kernel; + +template +sycl::event sign_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SignHS = hyperparam_detail::SignContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SignHS::vec_sz; + static constexpr std::uint8_t n_vecs = SignHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SignOutputType, SignContigFunctor, sign_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SignContigFactory +{ + fnT get() + { + if constexpr (!SignOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sign_contig_impl; + return fn; + } + } +}; + +template +struct SignTypeMapFactory +{ + /*! @brief get typeid for output type of sign(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename SignOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +using SignStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +class sign_strided_kernel; + +template +sycl::event + sign_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, SignOutputType, SignStridedFunctor, sign_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SignStridedFactory +{ + fnT get() + { + if constexpr (!SignOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sign_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::sign diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/signbit.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/signbit.hpp new file mode 100644 index 000000000000..d67120633efd --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/signbit.hpp @@ -0,0 +1,223 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SIGNBIT(x) +/// function that tests whether the sign bit of the tensor element is set. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::signbit +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::vec_cast; + +template +struct SignbitFunctor +{ + static_assert(std::is_same_v); + + using is_constant = std::false_type; + static constexpr resT constant_value = false; + using supports_vec = std::true_type; + using supports_sg_loadstore = std::true_type; + + resT operator()(const argT &in) const + { + return std::signbit(in); + } + + template + sycl::vec operator()(const sycl::vec &in) const + { + auto const &res_vec = sycl::signbit(in); + + using deducedT = typename std::remove_cv_t< + std::remove_reference_t>::element_type; + + return vec_cast(res_vec); + } +}; + +template +using SignbitContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using SignbitStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct SignbitOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SignbitContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class signbit_contig_kernel; + +template +sycl::event signbit_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SignbitHS = hyperparam_detail::SignbitContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SignbitHS::vec_sz; + static constexpr std::uint8_t n_vecs = SignbitHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SignbitOutputType, SignbitContigFunctor, signbit_contig_kernel, + vec_sz, n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SignbitContigFactory +{ + fnT get() + { + if constexpr (!SignbitOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = signbit_contig_impl; + return fn; + } + } +}; + +template +struct SignbitTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::isinf(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename SignbitOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class signbit_strided_kernel; + +template +sycl::event + signbit_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SignbitStridedFactory +{ + fnT get() + { + if constexpr (!SignbitOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = signbit_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::signbit diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sin.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sin.hpp new file mode 100644 index 000000000000..d1e3caa9effe --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sin.hpp @@ -0,0 +1,333 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SIN(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::sin +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct SinFunctor +{ + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + static constexpr realT q_nan = + std::numeric_limits::quiet_NaN(); + + realT const &in_re = std::real(in); + realT const &in_im = std::imag(in); + + const bool in_re_finite = std::isfinite(in_re); + const bool in_im_finite = std::isfinite(in_im); + /* + * Handle the nearly-non-exceptional cases where + * real and imaginary parts of input are finite. + */ + if (in_re_finite && in_im_finite) { + resT res = + exprm_ns::sin(exprm_ns::complex(in)); // sin(in); + if (in_re == realT(0)) { + res.real(sycl::copysign(realT(0), in_re)); + } + return res; + } + + /* + * since sin(in) = -I * sinh(I * in), for special cases, + * we calculate real and imaginary parts of z = sinh(I * in) and + * then return { imag(z) , -real(z) } which is sin(in). + */ + const realT x = -in_im; + const realT y = in_re; + const bool xfinite = in_im_finite; + const bool yfinite = in_re_finite; + /* + * sinh(+-0 +- I Inf) = sign(d(+-0, dNaN))0 + I dNaN. + * The sign of 0 in the result is unspecified. Choice = normally + * the same as dNaN. + * + * sinh(+-0 +- I NaN) = sign(d(+-0, NaN))0 + I d(NaN). + * The sign of 0 in the result is unspecified. Choice = normally + * the same as d(NaN). + */ + if (x == realT(0) && !yfinite) { + const realT sinh_im = q_nan; + const realT sinh_re = sycl::copysign(realT(0), x * sinh_im); + return resT{sinh_im, -sinh_re}; + } + + /* + * sinh(+-Inf +- I 0) = +-Inf + I +-0. + * + * sinh(NaN +- I 0) = d(NaN) + I +-0. + */ + if (y == realT(0) && !xfinite) { + if (std::isnan(x)) { + const realT sinh_re = x; + const realT sinh_im = y; + return resT{sinh_im, -sinh_re}; + } + const realT sinh_re = x; + const realT sinh_im = sycl::copysign(realT(0), y); + return resT{sinh_im, -sinh_re}; + } + + /* + * sinh(x +- I Inf) = dNaN + I dNaN. + * + * sinh(x + I NaN) = d(NaN) + I d(NaN). + */ + if (xfinite && !yfinite) { + const realT sinh_re = q_nan; + const realT sinh_im = x * sinh_re; + return resT{sinh_im, -sinh_re}; + } + + /* + * sinh(+-Inf + I NaN) = +-Inf + I d(NaN). + * The sign of Inf in the result is unspecified. Choice = normally + * the same as d(NaN). + * + * sinh(+-Inf +- I Inf) = +Inf + I dNaN. + * The sign of Inf in the result is unspecified. + * Choice = always - here for sinh to have positive result for + * imaginary part of sin. + * + * sinh(+-Inf + I y) = +-Inf cos(y) + I Inf sin(y) + */ + if (std::isinf(x)) { + if (!yfinite) { + const realT sinh_re = -x * x; + const realT sinh_im = x * (y - y); + return resT{sinh_im, -sinh_re}; + } + const realT sinh_re = x * sycl::cos(y); + const realT sinh_im = + std::numeric_limits::infinity() * sycl::sin(y); + return resT{sinh_im, -sinh_re}; + } + + /* + * sinh(NaN + I NaN) = d(NaN) + I d(NaN). + * + * sinh(NaN +- I Inf) = d(NaN) + I d(NaN). + * + * sinh(NaN + I y) = d(NaN) + I d(NaN). + */ + const realT y_m_y = (y - y); + const realT sinh_re = (x * x) * y_m_y; + const realT sinh_im = (x + x) * y_m_y; + return resT{sinh_im, -sinh_re}; + } + else { + static_assert(std::is_same_v); + if (in == 0) { + return in; + } + return sycl::sin(in); + } + } +}; + +template +using SinContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using SinStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct SinOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SinContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class sin_contig_kernel; + +template +sycl::event sin_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SinHS = hyperparam_detail::SinContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SinHS::vec_sz; + static constexpr std::uint8_t n_vecs = SinHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SinOutputType, SinContigFunctor, sin_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SinContigFactory +{ + fnT get() + { + if constexpr (!SinOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sin_contig_impl; + return fn; + } + } +}; + +template +struct SinTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::sin(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename SinOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class sin_strided_kernel; + +template +sycl::event sin_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, SinOutputType, SinStridedFunctor, sin_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SinStridedFactory +{ + fnT get() + { + if constexpr (!SinOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sin_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::sin diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sinh.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sinh.hpp new file mode 100644 index 000000000000..f81a2730fd17 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sinh.hpp @@ -0,0 +1,302 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SINH(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::sinh +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct SinhFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + const realT x = std::real(in); + const realT y = std::imag(in); + + const bool xfinite = std::isfinite(x); + const bool yfinite = std::isfinite(y); + + /* + * Handle the nearly-non-exceptional cases where + * real and imaginary parts of input are finite. + */ + if (xfinite && yfinite) { + return exprm_ns::sinh(exprm_ns::complex(in)); + } + /* + * sinh(+-0 +- I Inf) = sign(d(+-0, dNaN))0 + I dNaN. + * The sign of 0 in the result is unspecified. Choice = normally + * the same as dNaN. + * + * sinh(+-0 +- I NaN) = sign(d(+-0, NaN))0 + I d(NaN). + * The sign of 0 in the result is unspecified. Choice = normally + * the same as d(NaN). + */ + if (x == realT(0) && !yfinite) { + const realT res_re = sycl::copysign(realT(0), x * (y - y)); + return resT{res_re, y - y}; + } + + /* + * sinh(+-Inf +- I 0) = +-Inf + I +-0. + * + * sinh(NaN +- I 0) = d(NaN) + I +-0. + */ + if (y == realT(0) && !xfinite) { + if (std::isnan(x)) { + return resT{x, y}; + } + const realT res_im = sycl::copysign(realT(0), y); + return resT{x, res_im}; + } + + /* + * sinh(x +- I Inf) = dNaN + I dNaN. + * + * sinh(x + I NaN) = d(NaN) + I d(NaN). + */ + if (xfinite && !yfinite) { + return resT{y - y, x * (y - y)}; + } + + /* + * sinh(+-Inf + I NaN) = +-Inf + I d(NaN). + * The sign of Inf in the result is unspecified. Choice = normally + * the same as d(NaN). + * + * sinh(+-Inf +- I Inf) = +Inf + I dNaN. + * The sign of Inf in the result is unspecified. Choice = always +. + * + * sinh(+-Inf + I y) = +-Inf cos(y) + I Inf sin(y) + */ + if (!xfinite && !std::isnan(x)) { + if (!yfinite) { + return resT{x * x, x * (y - y)}; + } + return resT{x * sycl::cos(y), + std::numeric_limits::infinity() * + sycl::sin(y)}; + } + + /* + * sinh(NaN + I NaN) = d(NaN) + I d(NaN). + * + * sinh(NaN +- I Inf) = d(NaN) + I d(NaN). + * + * sinh(NaN + I y) = d(NaN) + I d(NaN). + */ + return resT{(x * x) * (y - y), (x + x) * (y - y)}; + } + else { + static_assert(std::is_floating_point_v || + std::is_same_v); + return sycl::sinh(in); + } + } +}; + +template +using SinhContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using SinhStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct SinhOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SinhContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class sinh_contig_kernel; + +template +sycl::event sinh_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SinhHS = hyperparam_detail::SinhContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SinhHS::vec_sz; + static constexpr std::uint8_t n_vecs = SinhHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SinhOutputType, SinhContigFunctor, sinh_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SinhContigFactory +{ + fnT get() + { + if constexpr (!SinhOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sinh_contig_impl; + return fn; + } + } +}; + +template +struct SinhTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::sinh(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename SinhOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class sinh_strided_kernel; + +template +sycl::event + sinh_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, SinhOutputType, SinhStridedFunctor, sinh_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SinhStridedFactory +{ + fnT get() + { + if constexpr (!SinhOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sinh_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::sinh diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp index 0cb0c190eb6a..d3437991d6d0 100644 --- a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp @@ -95,10 +95,10 @@ // #include "remainder.hpp" #include "round.hpp" // #include "rsqrt.hpp" -// #include "sign.hpp" -// #include "signbit.hpp" -// #include "sin.hpp" -// #include "sinh.hpp" +#include "sign.hpp" +#include "signbit.hpp" +#include "sin.hpp" +#include "sinh.hpp" // #include "sqrt.hpp" // #include "square.hpp" // #include "subtract.hpp" @@ -176,10 +176,10 @@ void init_elementwise_functions(py::module_ m) // init_remainder(m); init_round(m); // init_rsqrt(m); - // init_sign(m); - // init_signbit(m); - // init_sin(m); - // init_sinh(m); + init_sign(m); + init_signbit(m); + init_sin(m); + init_sinh(m); // init_sqrt(m); // init_square(m); // init_subtract(m); diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.cpp new file mode 100644 index 000000000000..deb5360bcdd1 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "sign.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/sign.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U29: ==== SIGN (x) +namespace impl +{ + +namespace sign_fn_ns = dpctl::tensor::kernels::sign; + +static unary_contig_impl_fn_ptr_t sign_contig_dispatch_vector[td_ns::num_types]; +static int sign_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + sign_strided_dispatch_vector[td_ns::num_types]; + +void populate_sign_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = sign_fn_ns; + + using fn_ns::SignContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(sign_contig_dispatch_vector); + + using fn_ns::SignStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(sign_strided_dispatch_vector); + + using fn_ns::SignTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(sign_output_typeid_vector); +}; + +} // namespace impl + +void init_sign(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_sign_dispatch_vectors(); + using impl::sign_contig_dispatch_vector; + using impl::sign_output_typeid_vector; + using impl::sign_strided_dispatch_vector; + + auto sign_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, sign_output_typeid_vector, + sign_contig_dispatch_vector, sign_strided_dispatch_vector); + }; + m.def("_sign", sign_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto sign_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, sign_output_typeid_vector); + }; + m.def("_sign_result_type", sign_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.hpp new file mode 100644 index 000000000000..19686ada3dbf --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sign.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_sign(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.cpp new file mode 100644 index 000000000000..3ed9eba46ea1 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.cpp @@ -0,0 +1,127 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "signbit.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/signbit.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U41: ==== SIGNBIT (x) +namespace impl +{ + +namespace signbit_fn_ns = dpctl::tensor::kernels::signbit; + +static unary_contig_impl_fn_ptr_t + signbit_contig_dispatch_vector[td_ns::num_types]; +static int signbit_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + signbit_strided_dispatch_vector[td_ns::num_types]; + +void populate_signbit_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = signbit_fn_ns; + + using fn_ns::SignbitContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(signbit_contig_dispatch_vector); + + using fn_ns::SignbitStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(signbit_strided_dispatch_vector); + + using fn_ns::SignbitTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(signbit_output_typeid_vector); +}; + +} // namespace impl + +void init_signbit(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_signbit_dispatch_vectors(); + using impl::signbit_contig_dispatch_vector; + using impl::signbit_output_typeid_vector; + using impl::signbit_strided_dispatch_vector; + + auto signbit_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc(src, dst, exec_q, depends, + signbit_output_typeid_vector, + signbit_contig_dispatch_vector, + signbit_strided_dispatch_vector); + }; + m.def("_signbit", signbit_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto signbit_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + signbit_output_typeid_vector); + }; + m.def("_signbit_result_type", signbit_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.hpp new file mode 100644 index 000000000000..292386b174fc --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/signbit.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_signbit(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.cpp new file mode 100644 index 000000000000..7f8d0e79a42c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "sin.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/sin.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U30: ==== SIN (x) +namespace impl +{ + +namespace sin_fn_ns = dpctl::tensor::kernels::sin; + +static unary_contig_impl_fn_ptr_t sin_contig_dispatch_vector[td_ns::num_types]; +static int sin_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + sin_strided_dispatch_vector[td_ns::num_types]; + +void populate_sin_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = sin_fn_ns; + + using fn_ns::SinContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(sin_contig_dispatch_vector); + + using fn_ns::SinStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(sin_strided_dispatch_vector); + + using fn_ns::SinTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(sin_output_typeid_vector); +}; + +} // namespace impl + +void init_sin(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_sin_dispatch_vectors(); + using impl::sin_contig_dispatch_vector; + using impl::sin_output_typeid_vector; + using impl::sin_strided_dispatch_vector; + + auto sin_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, sin_output_typeid_vector, + sin_contig_dispatch_vector, sin_strided_dispatch_vector); + }; + m.def("_sin", sin_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto sin_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, sin_output_typeid_vector); + }; + m.def("_sin_result_type", sin_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.hpp new file mode 100644 index 000000000000..a4b3da08b7fc --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sin.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_sin(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.cpp new file mode 100644 index 000000000000..d63335fa5408 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "sinh.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/sinh.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U31: ==== SINH (x) +namespace impl +{ + +namespace sinh_fn_ns = dpctl::tensor::kernels::sinh; + +static unary_contig_impl_fn_ptr_t sinh_contig_dispatch_vector[td_ns::num_types]; +static int sinh_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + sinh_strided_dispatch_vector[td_ns::num_types]; + +void populate_sinh_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = sinh_fn_ns; + + using fn_ns::SinhContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(sinh_contig_dispatch_vector); + + using fn_ns::SinhStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(sinh_strided_dispatch_vector); + + using fn_ns::SinhTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(sinh_output_typeid_vector); +}; + +} // namespace impl + +void init_sinh(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_sinh_dispatch_vectors(); + using impl::sinh_contig_dispatch_vector; + using impl::sinh_output_typeid_vector; + using impl::sinh_strided_dispatch_vector; + + auto sinh_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, sinh_output_typeid_vector, + sinh_contig_dispatch_vector, sinh_strided_dispatch_vector); + }; + m.def("_sinh", sinh_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto sinh_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, sinh_output_typeid_vector); + }; + m.def("_sinh_result_type", sinh_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.hpp new file mode 100644 index 000000000000..4a0d90d24c8c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sinh.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_sinh(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 5e3bb23d2261..218a05598423 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -4668,8 +4668,8 @@ def real_if_close(a, tol=100): sign = DPNPUnaryFunc( "sign", - ti._sign_result_type, - ti._sign, + ti_ext._sign_result_type, + ti_ext._sign, _SIGN_DOCSTRING, acceptance_fn=acceptance_fn_sign, ) @@ -4730,8 +4730,8 @@ def real_if_close(a, tol=100): signbit = DPNPUnaryFunc( "signbit", - ti._signbit_result_type, - ti._signbit, + ti_ext._signbit_result_type, + ti_ext._signbit, _SIGNBIT_DOCSTRING, ) diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index d459a3392311..f063df5c598d 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -2309,8 +2309,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): sin = DPNPUnaryFunc( "sin", - ti._sin_result_type, - ti._sin, + ti_ext._sin_result_type, + ti_ext._sin, _SIN_DOCSTRING, mkl_fn_to_call="_mkl_sin_to_call", mkl_impl_fn="_sin", @@ -2372,8 +2372,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): sinh = DPNPUnaryFunc( "sinh", - ti._sinh_result_type, - ti._sinh, + ti_ext._sinh_result_type, + ti_ext._sinh, _SINH_DOCSTRING, mkl_fn_to_call="_mkl_sinh_to_call", mkl_impl_fn="_sinh", From a1707b265e74dcd3301eb9f44271d196e5d860d3 Mon Sep 17 00:00:00 2001 From: Vladislav Perevezentsev Date: Wed, 4 Mar 2026 08:42:32 -0800 Subject: [PATCH 06/11] Move ti.square()/sqrt()/tan()/tanh() and reuse them --- dpctl_ext/tensor/CMakeLists.txt | 8 +- dpctl_ext/tensor/__init__.py | 8 + dpctl_ext/tensor/_elementwise_funcs.py | 111 +++++++ .../kernels/elementwise_functions/sqrt.hpp | 224 ++++++++++++++ .../kernels/elementwise_functions/square.hpp | 251 ++++++++++++++++ .../kernels/elementwise_functions/tan.hpp | 276 ++++++++++++++++++ .../kernels/elementwise_functions/tanh.hpp | 270 +++++++++++++++++ .../elementwise_common.cpp | 16 +- .../source/elementwise_functions/sqrt.cpp | 124 ++++++++ .../source/elementwise_functions/sqrt.hpp | 46 +++ .../source/elementwise_functions/square.cpp | 126 ++++++++ .../source/elementwise_functions/square.hpp | 46 +++ .../source/elementwise_functions/tan.cpp | 124 ++++++++ .../source/elementwise_functions/tan.hpp | 46 +++ .../source/elementwise_functions/tanh.cpp | 124 ++++++++ .../source/elementwise_functions/tanh.hpp | 46 +++ dpnp/dpnp_iface_trigonometric.py | 16 +- 17 files changed, 1842 insertions(+), 20 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sqrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/square.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tan.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tanh.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/square.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/square.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 85f52f8665ba..7de341e6654c 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -136,11 +136,11 @@ set(_elementwise_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/signbit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sin.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sinh.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sqrt.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/square.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sqrt.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/square.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/subtract.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tan.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tanh.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tan.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tanh.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/true_divide.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/trunc.cpp ) diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index 9d391fdd1bbc..9ea61267e344 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -118,6 +118,10 @@ signbit, sin, sinh, + sqrt, + square, + tan, + tanh, ) from ._reduction import ( argmax, @@ -230,12 +234,16 @@ "sin", "sinh", "sort", + "sqrt", + "square", "squeeze", "stack", "sum", "swapaxes", "take", "take_along_axis", + "tan", + "tanh", "tile", "top_k", "to_numpy", diff --git a/dpctl_ext/tensor/_elementwise_funcs.py b/dpctl_ext/tensor/_elementwise_funcs.py index dcdd1e53326a..9d23044db714 100644 --- a/dpctl_ext/tensor/_elementwise_funcs.py +++ b/dpctl_ext/tensor/_elementwise_funcs.py @@ -931,6 +931,117 @@ ) del _sinh_docstring +# U32: ==== SQUARE (x) +_square_docstring_ = r""" +square(x, /, \*, out=None, order='K') + +Squares each element `x_i` of input array `x`. + +Args: + x (usm_ndarray): + Input array. May have any data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise squares of `x`. The data type of + the returned array is determined by the Type Promotion Rules. +""" + +square = UnaryElementwiseFunc( + "square", ti._square_result_type, ti._square, _square_docstring_ +) +del _square_docstring_ + +# U33: ==== SQRT (x) +_sqrt_docstring_ = r""" +sqrt(x, /, \*, out=None, order='K') + +Computes the positive square-root for each element `x_i` of input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise positive square-roots of `x`. The + data type of the returned array is determined by the Type Promotion + Rules. +""" + +sqrt = UnaryElementwiseFunc( + "sqrt", ti._sqrt_result_type, ti._sqrt, _sqrt_docstring_ +) +del _sqrt_docstring_ + +# U34: ==== TAN (x) +_tan_docstring = r""" +tan(x, /, \*, out=None, order='K') + +Computes tangent for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise tangent. The data type + of the returned array is determined by the Type Promotion Rules. +""" + +tan = UnaryElementwiseFunc("tan", ti._tan_result_type, ti._tan, _tan_docstring) +del _tan_docstring + +# U35: ==== TANH (x) +_tanh_docstring = r""" +tanh(x, /, \*, out=None, order='K') + +Computes hyperbolic tangent for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise hyperbolic tangent. The data type + of the returned array is determined by the Type Promotion Rules. +""" + +tanh = UnaryElementwiseFunc( + "tanh", ti._tanh_result_type, ti._tanh, _tanh_docstring +) +del _tanh_docstring + # U40: ==== PROJ (x) _proj_docstring = r""" proj(x, /, \*, out=None, order='K') diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sqrt.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sqrt.hpp new file mode 100644 index 000000000000..db092ca29595 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/sqrt.hpp @@ -0,0 +1,224 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SQRT(x) +/// function that compute a square root. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::sqrt +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct SqrtFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + return exprm_ns::sqrt(exprm_ns::complex(in)); + } + else { + return sycl::sqrt(in); + } + } +}; + +template +using SqrtContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using SqrtStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct SqrtOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, std::complex>, + td_ns:: + TypeMapResultEntry, std::complex>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SqrtContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class sqrt_contig_kernel; + +template +sycl::event sqrt_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SqrtHS = hyperparam_detail::SqrtContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SqrtHS::vec_sz; + static constexpr std::uint8_t n_vecs = SqrtHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SqrtOutputType, SqrtContigFunctor, sqrt_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SqrtContigFactory +{ + fnT get() + { + if constexpr (!SqrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sqrt_contig_impl; + return fn; + } + } +}; + +template +struct SqrtTypeMapFactory +{ + /*! @brief get typeid for output type of std::sqrt(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename SqrtOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class sqrt_strided_kernel; + +template +sycl::event + sqrt_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, SqrtOutputType, SqrtStridedFunctor, sqrt_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SqrtStridedFactory +{ + fnT get() + { + if constexpr (!SqrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = sqrt_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::sqrt diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/square.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/square.hpp new file mode 100644 index 000000000000..de3007acfbea --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/square.hpp @@ -0,0 +1,251 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of SQUARE(x) +/// +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::square +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; +using dpctl::tensor::type_utils::vec_cast; + +template +struct SquareFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::negation< + std::disjunction, is_complex>>; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + auto z = exprm_ns::complex(in); + + return z * z; + } + else { + return in * in; + } + } + + template + sycl::vec operator()(const sycl::vec &in) const + { + auto const &res_vec = in * in; + using deducedT = typename std::remove_cv_t< + std::remove_reference_t>::element_type; + if constexpr (std::is_same_v) { + return res_vec; + } + else { + return vec_cast(res_vec); + } + } +}; + +template +using SquareContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using SquareStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct SquareOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct SquareContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class square_contig_kernel; + +template +sycl::event square_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using SquareHS = hyperparam_detail::SquareContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = SquareHS::vec_sz; + static constexpr std::uint8_t n_vecs = SquareHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, SquareOutputType, SquareContigFunctor, square_contig_kernel, + vec_sz, n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct SquareContigFactory +{ + fnT get() + { + if constexpr (!SquareOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = square_contig_impl; + return fn; + } + } +}; + +template +struct SquareTypeMapFactory +{ + /*! @brief get typeid for output type of x * x */ + std::enable_if_t::value, int> get() + { + using rT = typename SquareOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class square_strided_kernel; + +template +sycl::event + square_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, SquareOutputType, SquareStridedFunctor, square_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct SquareStridedFactory +{ + fnT get() + { + if constexpr (!SquareOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = square_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::square diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tan.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tan.hpp new file mode 100644 index 000000000000..2db2a6b5fbf8 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tan.hpp @@ -0,0 +1,276 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of TAN(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::tan +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct TanFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + + using realT = typename argT::value_type; + + static constexpr realT q_nan = + std::numeric_limits::quiet_NaN(); + /* + * since tan(in) = -I * tanh(I * in), for special cases, + * we calculate real and imaginary parts of z = tanh(I * in) and + * return { imag(z) , -real(z) } which is tan(in). + */ + const realT x = -std::imag(in); + const realT y = std::real(in); + /* + * tanh(NaN + i 0) = NaN + i 0 + * + * tanh(NaN + i y) = NaN + i NaN for y != 0 + * + * The imaginary part has the sign of x*sin(2*y), but there's no + * special effort to get this right. + * + * tanh(+-Inf +- i Inf) = +-1 +- 0 + * + * tanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite + * + * The imaginary part of the sign is unspecified. This special + * case is only needed to avoid a spurious invalid exception when + * y is infinite. + */ + if (!std::isfinite(x)) { + if (std::isnan(x)) { + const realT tanh_re = x; + const realT tanh_im = (y == realT(0) ? y : x * y); + return resT{tanh_im, -tanh_re}; + } + const realT tanh_re = sycl::copysign(realT(1), x); + const realT tanh_im = sycl::copysign( + realT(0), std::isinf(y) ? y : sycl::sin(y) * sycl::cos(y)); + return resT{tanh_im, -tanh_re}; + } + /* + * tanh(x + i NAN) = NaN + i NaN for non-zero x + * tanh(x +- i Inf) = NaN + i NaN for non-zero x + * tanh(0 + i NAN) = 0 + i NaN + * tanh(0 +- i Inf) = 0 + i NaN + */ + if (!std::isfinite(y)) { + if (x == realT(0)) { + return resT{q_nan, x}; + } + return resT{q_nan, q_nan}; + } + /* ordinary cases */ + return exprm_ns::tan(exprm_ns::complex(in)); // tan(in); + } + else { + static_assert(std::is_floating_point_v || + std::is_same_v); + return sycl::tan(in); + } + } +}; + +template +using TanContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using TanStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct TanOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct TanContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class tan_contig_kernel; + +template +sycl::event tan_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using TanHS = hyperparam_detail::TanContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = TanHS::vec_sz; + static constexpr std::uint8_t n_vecs = TanHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, TanOutputType, TanContigFunctor, tan_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct TanContigFactory +{ + fnT get() + { + if constexpr (!TanOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = tan_contig_impl; + return fn; + } + } +}; + +template +struct TanTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::tan(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename TanOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class tan_strided_kernel; + +template +sycl::event tan_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, TanOutputType, TanStridedFunctor, tan_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct TanStridedFactory +{ + fnT get() + { + if constexpr (!TanOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = tan_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::tan diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tanh.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tanh.hpp new file mode 100644 index 000000000000..dde16128fb1a --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/tanh.hpp @@ -0,0 +1,270 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of TANH(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::tanh +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct TanhFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + static constexpr realT q_nan = + std::numeric_limits::quiet_NaN(); + + const realT x = std::real(in); + const realT y = std::imag(in); + /* + * tanh(NaN + i 0) = NaN + i 0 + * + * tanh(NaN + i y) = NaN + i NaN for y != 0 + * + * The imaginary part has the sign of x*sin(2*y), but there's no + * special effort to get this right. + * + * tanh(+-Inf +- i Inf) = +-1 +- 0 + * + * tanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite + * + * The imaginary part of the sign is unspecified. This special + * case is only needed to avoid a spurious invalid exception when + * y is infinite. + */ + if (!std::isfinite(x)) { + if (std::isnan(x)) { + return resT{q_nan, (y == realT(0) ? y : q_nan)}; + } + const realT res_re = sycl::copysign(realT(1), x); + const realT res_im = sycl::copysign( + realT(0), std::isinf(y) ? y : sycl::sin(y) * sycl::cos(y)); + return resT{res_re, res_im}; + } + /* + * tanh(x + i NAN) = NaN + i NaN for non-zero x + * tanh(x +- i Inf) = NaN + i NaN for non-zero x + * tanh(0 + i NAN) = 0 + i NaN + * tanh(0 +- i Inf) = 0 + i NaN + */ + if (!std::isfinite(y)) { + if (x == realT(0)) { + return resT{x, q_nan}; + } + return resT{q_nan, q_nan}; + } + /* ordinary cases */ + return exprm_ns::tanh(exprm_ns::complex(in)); // tanh(in); + } + else { + static_assert(std::is_floating_point_v || + std::is_same_v); + return sycl::tanh(in); + } + } +}; + +template +using TanhContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using TanhStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct TanhOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct TanhContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class tanh_contig_kernel; + +template +sycl::event tanh_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using TanhHS = hyperparam_detail::TanhContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = TanhHS::vec_sz; + static constexpr std::uint8_t n_vecs = TanhHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, TanhOutputType, TanhContigFunctor, tanh_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct TanhContigFactory +{ + fnT get() + { + if constexpr (!TanhOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = tanh_contig_impl; + return fn; + } + } +}; + +template +struct TanhTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::tanh(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename TanhOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class tanh_strided_kernel; + +template +sycl::event + tanh_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, TanhOutputType, TanhStridedFunctor, tanh_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct TanhStridedFactory +{ + fnT get() + { + if constexpr (!TanhOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = tanh_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::tanh diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp index d3437991d6d0..69ada184c49d 100644 --- a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp @@ -99,11 +99,11 @@ #include "signbit.hpp" #include "sin.hpp" #include "sinh.hpp" -// #include "sqrt.hpp" -// #include "square.hpp" +#include "sqrt.hpp" +#include "square.hpp" // #include "subtract.hpp" -// #include "tan.hpp" -// #include "tanh.hpp" +#include "tan.hpp" +#include "tanh.hpp" // #include "true_divide.hpp" // #include "trunc.hpp" @@ -180,11 +180,11 @@ void init_elementwise_functions(py::module_ m) init_signbit(m); init_sin(m); init_sinh(m); - // init_sqrt(m); - // init_square(m); + init_sqrt(m); + init_square(m); // init_subtract(m); - // init_tan(m); - // init_tanh(m); + init_tan(m); + init_tanh(m); // init_trunc(m); } diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.cpp new file mode 100644 index 000000000000..f5483f5a05a9 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "sqrt.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/sqrt.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U33: ==== SQRT (x) +namespace impl +{ + +namespace sqrt_fn_ns = dpctl::tensor::kernels::sqrt; + +static unary_contig_impl_fn_ptr_t sqrt_contig_dispatch_vector[td_ns::num_types]; +static int sqrt_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + sqrt_strided_dispatch_vector[td_ns::num_types]; + +void populate_sqrt_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = sqrt_fn_ns; + + using fn_ns::SqrtContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(sqrt_contig_dispatch_vector); + + using fn_ns::SqrtStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(sqrt_strided_dispatch_vector); + + using fn_ns::SqrtTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(sqrt_output_typeid_vector); +}; + +} // namespace impl + +void init_sqrt(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_sqrt_dispatch_vectors(); + using impl::sqrt_contig_dispatch_vector; + using impl::sqrt_output_typeid_vector; + using impl::sqrt_strided_dispatch_vector; + + auto sqrt_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, sqrt_output_typeid_vector, + sqrt_contig_dispatch_vector, sqrt_strided_dispatch_vector); + }; + m.def("_sqrt", sqrt_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto sqrt_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, sqrt_output_typeid_vector); + }; + m.def("_sqrt_result_type", sqrt_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.hpp new file mode 100644 index 000000000000..e8f7014c1afc --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/sqrt.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_sqrt(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.cpp new file mode 100644 index 000000000000..b7116bc38bfc --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.cpp @@ -0,0 +1,126 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "square.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/square.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U32: ==== SQUARE (x) +namespace impl +{ + +namespace square_fn_ns = dpctl::tensor::kernels::square; + +static unary_contig_impl_fn_ptr_t + square_contig_dispatch_vector[td_ns::num_types]; +static int square_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + square_strided_dispatch_vector[td_ns::num_types]; + +void populate_square_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = square_fn_ns; + + using fn_ns::SquareContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(square_contig_dispatch_vector); + + using fn_ns::SquareStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(square_strided_dispatch_vector); + + using fn_ns::SquareTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(square_output_typeid_vector); +}; + +} // namespace impl + +void init_square(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_square_dispatch_vectors(); + using impl::square_contig_dispatch_vector; + using impl::square_output_typeid_vector; + using impl::square_strided_dispatch_vector; + + auto square_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, square_output_typeid_vector, + square_contig_dispatch_vector, square_strided_dispatch_vector); + }; + m.def("_square", square_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto square_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + square_output_typeid_vector); + }; + m.def("_square_result_type", square_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.hpp new file mode 100644 index 000000000000..3f23f184499c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/square.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_square(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.cpp new file mode 100644 index 000000000000..d8e8116bafeb --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "tan.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/tan.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U34: ==== TAN (x) +namespace impl +{ + +namespace tan_fn_ns = dpctl::tensor::kernels::tan; + +static unary_contig_impl_fn_ptr_t tan_contig_dispatch_vector[td_ns::num_types]; +static int tan_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + tan_strided_dispatch_vector[td_ns::num_types]; + +void populate_tan_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = tan_fn_ns; + + using fn_ns::TanContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(tan_contig_dispatch_vector); + + using fn_ns::TanStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(tan_strided_dispatch_vector); + + using fn_ns::TanTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(tan_output_typeid_vector); +}; + +} // namespace impl + +void init_tan(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_tan_dispatch_vectors(); + using impl::tan_contig_dispatch_vector; + using impl::tan_output_typeid_vector; + using impl::tan_strided_dispatch_vector; + + auto tan_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, tan_output_typeid_vector, + tan_contig_dispatch_vector, tan_strided_dispatch_vector); + }; + m.def("_tan", tan_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto tan_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, tan_output_typeid_vector); + }; + m.def("_tan_result_type", tan_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.hpp new file mode 100644 index 000000000000..b0818a9a85c2 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tan.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_tan(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.cpp new file mode 100644 index 000000000000..c6546730a52a --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "tanh.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/tanh.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U35: ==== TANH (x) +namespace impl +{ + +namespace tanh_fn_ns = dpctl::tensor::kernels::tanh; + +static unary_contig_impl_fn_ptr_t tanh_contig_dispatch_vector[td_ns::num_types]; +static int tanh_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + tanh_strided_dispatch_vector[td_ns::num_types]; + +void populate_tanh_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = tanh_fn_ns; + + using fn_ns::TanhContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(tanh_contig_dispatch_vector); + + using fn_ns::TanhStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(tanh_strided_dispatch_vector); + + using fn_ns::TanhTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(tanh_output_typeid_vector); +}; + +} // namespace impl + +void init_tanh(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_tanh_dispatch_vectors(); + using impl::tanh_contig_dispatch_vector; + using impl::tanh_output_typeid_vector; + using impl::tanh_strided_dispatch_vector; + + auto tanh_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, tanh_output_typeid_vector, + tanh_contig_dispatch_vector, tanh_strided_dispatch_vector); + }; + m.def("_tanh", tanh_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto tanh_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, tanh_output_typeid_vector); + }; + m.def("_tanh_result_type", tanh_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.hpp new file mode 100644 index 000000000000..d29c924d5e73 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/tanh.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_tanh(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index f063df5c598d..608eef55ae56 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -2449,8 +2449,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): sqrt = DPNPUnaryFunc( "sqrt", - ti._sqrt_result_type, - ti._sqrt, + ti_ext._sqrt_result_type, + ti_ext._sqrt, _SQRT_DOCSTRING, mkl_fn_to_call="_mkl_sqrt_to_call", mkl_impl_fn="_sqrt", @@ -2508,8 +2508,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): square = DPNPUnaryFunc( "square", - ti._square_result_type, - ti._square, + ti_ext._square_result_type, + ti_ext._square, _SQUARE_DOCSTRING, mkl_fn_to_call="_mkl_sqr_to_call", mkl_impl_fn="_sqr", @@ -2567,8 +2567,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): tan = DPNPUnaryFunc( "tan", - ti._tan_result_type, - ti._tan, + ti_ext._tan_result_type, + ti_ext._tan, _TAN_DOCSTRING, mkl_fn_to_call="_mkl_tan_to_call", mkl_impl_fn="_tan", @@ -2632,8 +2632,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): tanh = DPNPUnaryFunc( "tanh", - ti._tanh_result_type, - ti._tanh, + ti_ext._tanh_result_type, + ti_ext._tanh, _TANH_DOCSTRING, mkl_fn_to_call="_mkl_tanh_to_call", mkl_impl_fn="_tanh", From 0cdc3e4d73c67414aa4a66f2e826839f96352d05 Mon Sep 17 00:00:00 2001 From: Vladislav Perevezentsev Date: Wed, 4 Mar 2026 09:11:23 -0800 Subject: [PATCH 07/11] Move ti.cbrt()/exp2()/reciprocal()/rsqrt()/trunc() and reuse them --- dpctl_ext/tensor/CMakeLists.txt | 10 +- dpctl_ext/tensor/__init__.py | 10 + dpctl_ext/tensor/_elementwise_funcs.py | 152 ++++++++++ .../kernels/elementwise_functions/cbrt.hpp | 209 ++++++++++++++ .../kernels/elementwise_functions/exp2.hpp | 271 ++++++++++++++++++ .../elementwise_functions/reciprocal.hpp | 229 +++++++++++++++ .../kernels/elementwise_functions/rsqrt.hpp | 209 ++++++++++++++ .../kernels/elementwise_functions/trunc.hpp | 226 +++++++++++++++ .../source/elementwise_functions/cbrt.cpp | 124 ++++++++ .../source/elementwise_functions/cbrt.hpp | 48 ++++ .../elementwise_common.cpp | 20 +- .../source/elementwise_functions/exp2.cpp | 124 ++++++++ .../source/elementwise_functions/exp2.hpp | 46 +++ .../elementwise_functions/reciprocal.cpp | 128 +++++++++ .../elementwise_functions/reciprocal.hpp | 46 +++ .../source/elementwise_functions/rsqrt.cpp | 126 ++++++++ .../source/elementwise_functions/rsqrt.hpp | 46 +++ .../source/elementwise_functions/trunc.cpp | 126 ++++++++ .../source/elementwise_functions/trunc.hpp | 46 +++ dpnp/dpnp_iface_mathematical.py | 4 +- dpnp/dpnp_iface_trigonometric.py | 16 +- 21 files changed, 2191 insertions(+), 25 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/cbrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/exp2.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/reciprocal.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/rsqrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/trunc.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 7de341e6654c..ef3565f9827e 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -88,7 +88,7 @@ set(_elementwise_sources #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/bitwise_or.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/bitwise_right_shift.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/bitwise_xor.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/cbrt.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/cbrt.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/ceil.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/conj.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/copysign.cpp @@ -96,7 +96,7 @@ set(_elementwise_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/cosh.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/equal.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/exp.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/exp2.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/exp2.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/expm1.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/floor_divide.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/floor.cpp @@ -128,10 +128,10 @@ set(_elementwise_sources #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/pow.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/proj.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/real.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/reciprocal.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/reciprocal.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/remainder.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/round.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/rsqrt.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/rsqrt.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sign.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/signbit.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/sin.cpp @@ -142,7 +142,7 @@ set(_elementwise_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tan.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/tanh.cpp #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/true_divide.cpp - #${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/trunc.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/elementwise_functions/trunc.cpp ) set(_reduction_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/reductions/reduction_common.cpp diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index 9ea61267e344..5ec5c037d927 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -93,11 +93,13 @@ atan, atanh, bitwise_invert, + cbrt, ceil, conj, cos, cosh, exp, + exp2, expm1, floor, imag, @@ -113,7 +115,9 @@ positive, proj, real, + reciprocal, round, + rsqrt, sign, signbit, sin, @@ -122,6 +126,7 @@ square, tan, tanh, + trunc, ) from ._reduction import ( argmax, @@ -167,6 +172,7 @@ "broadcast_arrays", "broadcast_to", "can_cast", + "cbrt", "ceil", "concat", "conj", @@ -185,6 +191,7 @@ "expand_dims", "eye", "exp", + "exp2", "expm1", "finfo", "flip", @@ -222,12 +229,14 @@ "put", "put_along_axis", "real", + "reciprocal", "reduce_hypot", "repeat", "reshape", "result_type", "roll", "round", + "rsqrt", "searchsorted", "sign", "signbit", @@ -249,6 +258,7 @@ "to_numpy", "tril", "triu", + "trunc", "unique_all", "unique_counts", "unique_inverse", diff --git a/dpctl_ext/tensor/_elementwise_funcs.py b/dpctl_ext/tensor/_elementwise_funcs.py index 9d23044db714..ae0ef8aa3496 100644 --- a/dpctl_ext/tensor/_elementwise_funcs.py +++ b/dpctl_ext/tensor/_elementwise_funcs.py @@ -33,6 +33,7 @@ from ._elementwise_common import UnaryElementwiseFunc from ._type_utils import ( _acceptance_fn_negative, + _acceptance_fn_reciprocal, ) # U01: ==== ABS (x) @@ -1042,6 +1043,124 @@ ) del _tanh_docstring +# U36: ==== TRUNC (x) +_trunc_docstring = r""" +trunc(x, /, \*, out=None, order='K') + +Returns the truncated value for each element `x_i` for input array `x`. + +The truncated value of the scalar `x` is the nearest integer i which is +closer to zero than `x` is. In short, the fractional part of the +signed number `x` is discarded. + +Args: + x (usm_ndarray): + Input array, expected to have a boolean or real-valued data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the result of element-wise division. The data type + of the returned array is determined by the Type Promotion Rules. +""" +trunc = UnaryElementwiseFunc( + "trunc", ti._trunc_result_type, ti._trunc, _trunc_docstring +) +del _trunc_docstring + +# U37: ==== CBRT (x) +_cbrt_docstring_ = r""" +cbrt(x, /, \*, out=None, order='K') + +Computes the cube-root for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a real-valued floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise cube-root. + The data type of the returned array is determined by + the Type Promotion Rules. +""" + +cbrt = UnaryElementwiseFunc( + "cbrt", ti._cbrt_result_type, ti._cbrt, _cbrt_docstring_ +) +del _cbrt_docstring_ + +# U38: ==== EXP2 (x) +_exp2_docstring_ = r""" +exp2(x, /, \*, out=None, order='K') + +Computes the base-2 exponential for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise base-2 exponentials. + The data type of the returned array is determined by + the Type Promotion Rules. +""" + +exp2 = UnaryElementwiseFunc( + "exp2", ti._exp2_result_type, ti._exp2, _exp2_docstring_ +) +del _exp2_docstring_ + +# U39: ==== RSQRT (x) +_rsqrt_docstring_ = r""" +rsqrt(x, /, \*, out=None, order='K') + +Computes the reciprocal square-root for each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a real-valued floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise reciprocal square-root. + The returned array has a floating-point data type determined by + the Type Promotion Rules. +""" + +rsqrt = UnaryElementwiseFunc( + "rsqrt", ti._rsqrt_result_type, ti._rsqrt, _rsqrt_docstring_ +) +del _rsqrt_docstring_ + # U40: ==== PROJ (x) _proj_docstring = r""" proj(x, /, \*, out=None, order='K') @@ -1098,6 +1217,39 @@ ) del _signbit_docstring +# U42: ==== RECIPROCAL (x) +_reciprocal_docstring = r""" +reciprocal(x, /, \*, out=None, order='K') + +Computes the reciprocal of each element `x_i` for input array `x`. + +Args: + x (usm_ndarray): + Input array, expected to have a floating-point data type. + out (Union[usm_ndarray, None], optional): + Output array to populate. + Array have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the new output array, if parameter + `out` is ``None``. + Default: "K". + +Returns: + usm_ndarray: + An array containing the element-wise reciprocals. + The returned array has a floating-point data type determined + by the Type Promotion Rules. +""" + +reciprocal = UnaryElementwiseFunc( + "reciprocal", + ti._reciprocal_result_type, + ti._reciprocal, + _reciprocal_docstring, + acceptance_fn=_acceptance_fn_reciprocal, +) +del _reciprocal_docstring + # U43: ==== ANGLE (x) _angle_docstring = r""" angle(x, /, \*, out=None, order='K') diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/cbrt.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/cbrt.hpp new file mode 100644 index 000000000000..072ee2b153ba --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/cbrt.hpp @@ -0,0 +1,209 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of CBRT(x) +/// function that compute a square root. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" + +namespace dpctl::tensor::kernels::cbrt +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +template +struct CbrtFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::true_type; + + resT operator()(const argT &in) const + { + return sycl::cbrt(in); + } +}; + +template +using CbrtContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using CbrtStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct CbrtOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct CbrtContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class cbrt_contig_kernel; + +template +sycl::event cbrt_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using CbrtHS = hyperparam_detail::CbrtContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = CbrtHS::vec_sz; + static constexpr std::uint8_t n_vecs = CbrtHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, CbrtOutputType, CbrtContigFunctor, cbrt_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct CbrtContigFactory +{ + fnT get() + { + if constexpr (!CbrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = cbrt_contig_impl; + return fn; + } + } +}; + +template +struct CbrtTypeMapFactory +{ + /*! @brief get typeid for output type of std::cbrt(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename CbrtOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class cbrt_strided_kernel; + +template +sycl::event + cbrt_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, CbrtOutputType, CbrtStridedFunctor, cbrt_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct CbrtStridedFactory +{ + fnT get() + { + if constexpr (!CbrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = cbrt_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::cbrt diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/exp2.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/exp2.hpp new file mode 100644 index 000000000000..39a5a4906a27 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/exp2.hpp @@ -0,0 +1,271 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of EXP2(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::exp2 +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct Exp2Functor +{ + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + using realT = typename argT::value_type; + + const argT tmp = in * sycl::log(realT(2)); + + static constexpr realT q_nan = + std::numeric_limits::quiet_NaN(); + + const realT x = std::real(tmp); + const realT y = std::imag(tmp); + if (std::isfinite(x)) { + if (std::isfinite(y)) { + return exprm_ns::exp(exprm_ns::complex(tmp)); + } + else { + return resT{q_nan, q_nan}; + } + } + else if (std::isnan(x)) { + /* x is nan */ + if (y == realT(0)) { + return resT{in}; + } + else { + return resT{x, q_nan}; + } + } + else { + if (!sycl::signbit(x)) { /* x is +inf */ + if (y == realT(0)) { + return resT{x, y}; + } + else if (std::isfinite(y)) { + return resT{x * sycl::cos(y), x * sycl::sin(y)}; + } + else { + /* x = +inf, y = +-inf || nan */ + return resT{x, q_nan}; + } + } + else { /* x is -inf */ + if (std::isfinite(y)) { + realT exp_x = sycl::exp(x); + return resT{exp_x * sycl::cos(y), exp_x * sycl::sin(y)}; + } + else { + /* x = -inf, y = +-inf || nan */ + return resT{0, 0}; + } + } + } + } + else { + return sycl::exp2(in); + } + } +}; + +template +using Exp2ContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using Exp2StridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct Exp2OutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct Exp2ContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class exp2_contig_kernel; + +template +sycl::event exp2_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using Exp2HS = hyperparam_detail::Exp2ContigHyperparameterSet; + + static constexpr std::uint8_t vec_sz = Exp2HS::vec_sz; + static constexpr std::uint8_t n_vecs = Exp2HS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, Exp2OutputType, Exp2ContigFunctor, exp2_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct Exp2ContigFactory +{ + fnT get() + { + if constexpr (!Exp2OutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = exp2_contig_impl; + return fn; + } + } +}; + +template +struct Exp2TypeMapFactory +{ + /*! @brief get typeid for output type of sycl::exp2(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename Exp2OutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class exp2_strided_kernel; + +template +sycl::event + exp2_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, Exp2OutputType, Exp2StridedFunctor, exp2_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct Exp2StridedFactory +{ + fnT get() + { + if constexpr (!Exp2OutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = exp2_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::exp2 diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/reciprocal.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/reciprocal.hpp new file mode 100644 index 000000000000..f26f4043c9ab --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/reciprocal.hpp @@ -0,0 +1,229 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of RECIPROCAL(x) +/// function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include + +#include + +#include "sycl_complex.hpp" +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::reciprocal +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct ReciprocalFunctor +{ + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (is_complex::value) { + + using realT = typename argT::value_type; + + return realT(1) / exprm_ns::complex(in); + } + else { + return argT(1) / in; + } + } +}; + +template +using ReciprocalContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using ReciprocalStridedFunctor = + elementwise_common::UnaryStridedFunctor>; + +template +struct ReciprocalOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry>, + td_ns::TypeMapResultEntry>, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct ReciprocalContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class reciprocal_contig_kernel; + +template +sycl::event reciprocal_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using RecipHS = hyperparam_detail::ReciprocalContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = RecipHS::vec_sz; + static constexpr std::uint8_t n_vecs = RecipHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, ReciprocalOutputType, ReciprocalContigFunctor, + reciprocal_contig_kernel, vec_sz, n_vecs>(exec_q, nelems, arg_p, res_p, + depends); +} + +template +struct ReciprocalContigFactory +{ + fnT get() + { + if constexpr (!ReciprocalOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = reciprocal_contig_impl; + return fn; + } + } +}; + +template +struct ReciprocalTypeMapFactory +{ + /*! @brief get typeid for output type of 1 / x */ + std::enable_if_t::value, int> get() + { + using rT = typename ReciprocalOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class reciprocal_strided_kernel; + +template +sycl::event + reciprocal_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct ReciprocalStridedFactory +{ + fnT get() + { + if constexpr (!ReciprocalOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = reciprocal_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::reciprocal diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/rsqrt.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/rsqrt.hpp new file mode 100644 index 000000000000..0228aecdca67 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/rsqrt.hpp @@ -0,0 +1,209 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of RSQRT(x) +/// function that computes the reciprocal square root. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" + +namespace dpctl::tensor::kernels::rsqrt +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +template +struct RsqrtFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::true_type; + + resT operator()(const argT &in) const + { + return sycl::rsqrt(in); + } +}; + +template +using RsqrtContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using RsqrtStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct RsqrtOutputType +{ + using value_type = typename std::disjunction< + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct RsqrtContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // namespace hyperparam_detail + +template +class rsqrt_contig_kernel; + +template +sycl::event rsqrt_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using RsqrtHS = hyperparam_detail::RsqrtContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = RsqrtHS::vec_sz; + static constexpr std::uint8_t n_vecs = RsqrtHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, RsqrtOutputType, RsqrtContigFunctor, rsqrt_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct RsqrtContigFactory +{ + fnT get() + { + if constexpr (!RsqrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = rsqrt_contig_impl; + return fn; + } + } +}; + +template +struct RsqrtTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::rsqrt(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename RsqrtOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class rsqrt_strided_kernel; + +template +sycl::event + rsqrt_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, RsqrtOutputType, RsqrtStridedFunctor, rsqrt_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct RsqrtStridedFactory +{ + fnT get() + { + if constexpr (!RsqrtOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = rsqrt_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::rsqrt diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/trunc.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/trunc.hpp new file mode 100644 index 000000000000..6fae9c4f27e5 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/trunc.hpp @@ -0,0 +1,226 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for elementwise evaluation of TRUNC(x) function. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "vec_size_util.hpp" + +#include "kernels/dpctl_tensor_types.hpp" +#include "kernels/elementwise_functions/common.hpp" + +#include "utils/type_dispatch_building.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::trunc +{ + +using dpctl::tensor::ssize_t; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::type_utils::is_complex; + +template +struct TruncFunctor +{ + + // is function constant for given argT + using is_constant = typename std::false_type; + // constant value, if constant + // constexpr resT constant_value = resT{}; + // is function defined for sycl::vec + using supports_vec = typename std::false_type; + // do both argTy and resTy support sugroup store/load operation + using supports_sg_loadstore = typename std::negation< + std::disjunction, is_complex>>; + + resT operator()(const argT &in) const + { + if constexpr (std::is_integral_v) { + return in; + } + else { + return sycl::trunc(in); + } + } +}; + +template +using TruncContigFunctor = + elementwise_common::UnaryContigFunctor, + vec_sz, + n_vecs, + enable_sg_loadstore>; + +template +using TruncStridedFunctor = elementwise_common:: + UnaryStridedFunctor>; + +template +struct TruncOutputType +{ + using value_type = + typename std::disjunction, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::TypeMapResultEntry, + td_ns::DefaultResultEntry>::result_type; + + static constexpr bool is_defined = !std::is_same_v; +}; + +namespace hyperparam_detail +{ + +namespace vsu_ns = dpctl::tensor::kernels::vec_size_utils; + +using vsu_ns::ContigHyperparameterSetDefault; +using vsu_ns::UnaryContigHyperparameterSetEntry; + +template +struct TruncContigHyperparameterSet +{ + using value_type = + typename std::disjunction>; + + constexpr static auto vec_sz = value_type::vec_sz; + constexpr static auto n_vecs = value_type::n_vecs; +}; + +} // end of namespace hyperparam_detail + +template +class trunc_contig_kernel; + +template +sycl::event trunc_contig_impl(sycl::queue &exec_q, + std::size_t nelems, + const char *arg_p, + char *res_p, + const std::vector &depends = {}) +{ + using TruncHS = hyperparam_detail::TruncContigHyperparameterSet; + static constexpr std::uint8_t vec_sz = TruncHS::vec_sz; + static constexpr std::uint8_t n_vecs = TruncHS::n_vecs; + + return elementwise_common::unary_contig_impl< + argTy, TruncOutputType, TruncContigFunctor, trunc_contig_kernel, vec_sz, + n_vecs>(exec_q, nelems, arg_p, res_p, depends); +} + +template +struct TruncContigFactory +{ + fnT get() + { + if constexpr (!TruncOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = trunc_contig_impl; + return fn; + } + } +}; + +template +struct TruncTypeMapFactory +{ + /*! @brief get typeid for output type of sycl::trunc(T x) */ + std::enable_if_t::value, int> get() + { + using rT = typename TruncOutputType::value_type; + return td_ns::GetTypeid{}.get(); + } +}; + +template +class trunc_strided_kernel; + +template +sycl::event + trunc_strided_impl(sycl::queue &exec_q, + std::size_t nelems, + int nd, + const ssize_t *shape_and_strides, + const char *arg_p, + ssize_t arg_offset, + char *res_p, + ssize_t res_offset, + const std::vector &depends, + const std::vector &additional_depends) +{ + return elementwise_common::unary_strided_impl< + argTy, TruncOutputType, TruncStridedFunctor, trunc_strided_kernel>( + exec_q, nelems, nd, shape_and_strides, arg_p, arg_offset, res_p, + res_offset, depends, additional_depends); +} + +template +struct TruncStridedFactory +{ + fnT get() + { + if constexpr (!TruncOutputType::is_defined) { + fnT fn = nullptr; + return fn; + } + else { + fnT fn = trunc_strided_impl; + return fn; + } + } +}; + +} // namespace dpctl::tensor::kernels::trunc diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.cpp new file mode 100644 index 000000000000..3b8e1e85e9ff --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "cbrt.hpp" +#include "elementwise_functions.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/cbrt.hpp" +#include "kernels/elementwise_functions/common.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U37: ==== CBRT (x) +namespace impl +{ + +namespace cbrt_fn_ns = dpctl::tensor::kernels::cbrt; + +static unary_contig_impl_fn_ptr_t cbrt_contig_dispatch_vector[td_ns::num_types]; +static int cbrt_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + cbrt_strided_dispatch_vector[td_ns::num_types]; + +void populate_cbrt_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = cbrt_fn_ns; + + using fn_ns::CbrtContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(cbrt_contig_dispatch_vector); + + using fn_ns::CbrtStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(cbrt_strided_dispatch_vector); + + using fn_ns::CbrtTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(cbrt_output_typeid_vector); +}; + +} // namespace impl + +void init_cbrt(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_cbrt_dispatch_vectors(); + using impl::cbrt_contig_dispatch_vector; + using impl::cbrt_output_typeid_vector; + using impl::cbrt_strided_dispatch_vector; + + auto cbrt_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, cbrt_output_typeid_vector, + cbrt_contig_dispatch_vector, cbrt_strided_dispatch_vector); + }; + m.def("_cbrt", cbrt_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto cbrt_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, cbrt_output_typeid_vector); + }; + m.def("_cbrt_result_type", cbrt_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.hpp new file mode 100644 index 000000000000..0d52f48420f1 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/cbrt.hpp @@ -0,0 +1,48 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_cbrt(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp index 69ada184c49d..144e39be252f 100644 --- a/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/elementwise_common.cpp @@ -51,7 +51,7 @@ // #include "bitwise_or.hpp" // #include "bitwise_right_shift.hpp" // #include "bitwise_xor.hpp" -// #include "cbrt.hpp" +#include "cbrt.hpp" #include "ceil.hpp" #include "conj.hpp" // #include "copysign.hpp" @@ -59,7 +59,7 @@ #include "cosh.hpp" // #include "equal.hpp" #include "exp.hpp" -// #include "exp2.hpp" +#include "exp2.hpp" #include "expm1.hpp" #include "floor.hpp" // #include "floor_divide.hpp" @@ -91,10 +91,10 @@ // #include "pow.hpp" #include "proj.hpp" #include "real.hpp" -// #include "reciprocal.hpp" +#include "reciprocal.hpp" // #include "remainder.hpp" #include "round.hpp" -// #include "rsqrt.hpp" +#include "rsqrt.hpp" #include "sign.hpp" #include "signbit.hpp" #include "sin.hpp" @@ -105,7 +105,7 @@ #include "tan.hpp" #include "tanh.hpp" // #include "true_divide.hpp" -// #include "trunc.hpp" +#include "trunc.hpp" namespace dpctl::tensor::py_internal { @@ -131,7 +131,7 @@ void init_elementwise_functions(py::module_ m) // init_bitwise_or(m); // init_bitwise_right_shift(m); // init_bitwise_xor(m); - // init_cbrt(m); + init_cbrt(m); init_ceil(m); init_conj(m); // init_copysign(m); @@ -140,7 +140,7 @@ void init_elementwise_functions(py::module_ m) // init_divide(m); // init_equal(m); init_exp(m); - // init_exp2(m); + init_exp2(m); init_expm1(m); init_floor(m); // init_floor_divide(m); @@ -172,10 +172,10 @@ void init_elementwise_functions(py::module_ m) // init_pow(m); init_proj(m); init_real(m); - // init_reciprocal(m); + init_reciprocal(m); // init_remainder(m); init_round(m); - // init_rsqrt(m); + init_rsqrt(m); init_sign(m); init_signbit(m); init_sin(m); @@ -185,7 +185,7 @@ void init_elementwise_functions(py::module_ m) // init_subtract(m); init_tan(m); init_tanh(m); - // init_trunc(m); + init_trunc(m); } } // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.cpp new file mode 100644 index 000000000000..5e77d25e7b0c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.cpp @@ -0,0 +1,124 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "exp2.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/exp2.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U38: ==== EXP2 (x) +namespace impl +{ + +namespace exp2_fn_ns = dpctl::tensor::kernels::exp2; + +static unary_contig_impl_fn_ptr_t exp2_contig_dispatch_vector[td_ns::num_types]; +static int exp2_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + exp2_strided_dispatch_vector[td_ns::num_types]; + +void populate_exp2_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = exp2_fn_ns; + + using fn_ns::Exp2ContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(exp2_contig_dispatch_vector); + + using fn_ns::Exp2StridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(exp2_strided_dispatch_vector); + + using fn_ns::Exp2TypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(exp2_output_typeid_vector); +}; + +} // namespace impl + +void init_exp2(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_exp2_dispatch_vectors(); + using impl::exp2_contig_dispatch_vector; + using impl::exp2_output_typeid_vector; + using impl::exp2_strided_dispatch_vector; + + auto exp2_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, exp2_output_typeid_vector, + exp2_contig_dispatch_vector, exp2_strided_dispatch_vector); + }; + m.def("_exp2", exp2_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto exp2_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, exp2_output_typeid_vector); + }; + m.def("_exp2_result_type", exp2_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.hpp new file mode 100644 index 000000000000..f9f315d14383 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/exp2.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_exp2(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.cpp new file mode 100644 index 000000000000..81ce427ade92 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.cpp @@ -0,0 +1,128 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "reciprocal.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/reciprocal.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U42: ==== REAL (x) +namespace impl +{ + +namespace reciprocal_fn_ns = dpctl::tensor::kernels::reciprocal; + +static unary_contig_impl_fn_ptr_t + reciprocal_contig_dispatch_vector[td_ns::num_types]; +static int reciprocal_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + reciprocal_strided_dispatch_vector[td_ns::num_types]; + +void populate_reciprocal_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = reciprocal_fn_ns; + + using fn_ns::ReciprocalContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(reciprocal_contig_dispatch_vector); + + using fn_ns::ReciprocalStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(reciprocal_strided_dispatch_vector); + + using fn_ns::ReciprocalTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(reciprocal_output_typeid_vector); +}; + +} // namespace impl + +void init_reciprocal(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_reciprocal_dispatch_vectors(); + using impl::reciprocal_contig_dispatch_vector; + using impl::reciprocal_output_typeid_vector; + using impl::reciprocal_strided_dispatch_vector; + + auto reciprocal_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc(src, dst, exec_q, depends, + reciprocal_output_typeid_vector, + reciprocal_contig_dispatch_vector, + reciprocal_strided_dispatch_vector); + }; + m.def("_reciprocal", reciprocal_pyapi, "", py::arg("src"), + py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); + + auto reciprocal_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + reciprocal_output_typeid_vector); + }; + m.def("_reciprocal_result_type", reciprocal_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.hpp new file mode 100644 index 000000000000..1d2156f3464e --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/reciprocal.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_reciprocal(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.cpp new file mode 100644 index 000000000000..61a5c8bb94d5 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.cpp @@ -0,0 +1,126 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "rsqrt.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/rsqrt.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U39: ==== RSQRT (x) +namespace impl +{ + +namespace rsqrt_fn_ns = dpctl::tensor::kernels::rsqrt; + +static unary_contig_impl_fn_ptr_t + rsqrt_contig_dispatch_vector[td_ns::num_types]; +static int rsqrt_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + rsqrt_strided_dispatch_vector[td_ns::num_types]; + +void populate_rsqrt_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = rsqrt_fn_ns; + + using fn_ns::RsqrtContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(rsqrt_contig_dispatch_vector); + + using fn_ns::RsqrtStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(rsqrt_strided_dispatch_vector); + + using fn_ns::RsqrtTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(rsqrt_output_typeid_vector); +}; + +} // namespace impl + +void init_rsqrt(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_rsqrt_dispatch_vectors(); + using impl::rsqrt_contig_dispatch_vector; + using impl::rsqrt_output_typeid_vector; + using impl::rsqrt_strided_dispatch_vector; + + auto rsqrt_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, rsqrt_output_typeid_vector, + rsqrt_contig_dispatch_vector, rsqrt_strided_dispatch_vector); + }; + m.def("_rsqrt", rsqrt_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto rsqrt_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + rsqrt_output_typeid_vector); + }; + m.def("_rsqrt_result_type", rsqrt_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.hpp new file mode 100644 index 000000000000..4ba740a31777 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/rsqrt.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_rsqrt(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.cpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.cpp new file mode 100644 index 000000000000..9014dc3800ba --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.cpp @@ -0,0 +1,126 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#include + +#include + +#include "dpnp4pybind11.hpp" +#include +#include +#include + +#include "elementwise_functions.hpp" +#include "trunc.hpp" +#include "utils/type_dispatch.hpp" + +#include "kernels/elementwise_functions/common.hpp" +#include "kernels/elementwise_functions/trunc.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +namespace ew_cmn_ns = dpctl::tensor::kernels::elementwise_common; +using ew_cmn_ns::unary_contig_impl_fn_ptr_t; +using ew_cmn_ns::unary_strided_impl_fn_ptr_t; + +// U36: ==== TRUNC (x) +namespace impl +{ + +namespace trunc_fn_ns = dpctl::tensor::kernels::trunc; + +static unary_contig_impl_fn_ptr_t + trunc_contig_dispatch_vector[td_ns::num_types]; +static int trunc_output_typeid_vector[td_ns::num_types]; +static unary_strided_impl_fn_ptr_t + trunc_strided_dispatch_vector[td_ns::num_types]; + +void populate_trunc_dispatch_vectors(void) +{ + using namespace td_ns; + namespace fn_ns = trunc_fn_ns; + + using fn_ns::TruncContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(trunc_contig_dispatch_vector); + + using fn_ns::TruncStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(trunc_strided_dispatch_vector); + + using fn_ns::TruncTypeMapFactory; + DispatchVectorBuilder dvb3; + dvb3.populate_dispatch_vector(trunc_output_typeid_vector); +}; + +} // namespace impl + +void init_trunc(py::module_ m) +{ + using arrayT = dpctl::tensor::usm_ndarray; + using event_vecT = std::vector; + { + impl::populate_trunc_dispatch_vectors(); + using impl::trunc_contig_dispatch_vector; + using impl::trunc_output_typeid_vector; + using impl::trunc_strided_dispatch_vector; + + auto trunc_pyapi = [&](const arrayT &src, const arrayT &dst, + sycl::queue &exec_q, + const event_vecT &depends = {}) { + return py_unary_ufunc( + src, dst, exec_q, depends, trunc_output_typeid_vector, + trunc_contig_dispatch_vector, trunc_strided_dispatch_vector); + }; + m.def("_trunc", trunc_pyapi, "", py::arg("src"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto trunc_result_type_pyapi = [&](const py::dtype &dtype) { + return py_unary_ufunc_result_type(dtype, + trunc_output_typeid_vector); + }; + m.def("_trunc_result_type", trunc_result_type_pyapi); + } +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.hpp b/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.hpp new file mode 100644 index 000000000000..79ed6b5ded14 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/elementwise_functions/trunc.hpp @@ -0,0 +1,46 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_elementwise_impl +/// extension, specifically functions for elementwise operations. +//===---------------------------------------------------------------------===// + +#pragma once +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_trunc(py::module_ m); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index 218a05598423..e0cbda70d000 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -5229,8 +5229,8 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): trunc = DPNPUnaryFunc( "trunc", - ti._trunc_result_type, - ti._trunc, + ti_ext._trunc_result_type, + ti_ext._trunc, _TRUNC_DOCSTRING, mkl_fn_to_call="_mkl_trunc_to_call", mkl_impl_fn="_trunc", diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index 608eef55ae56..6deab3a8876c 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -718,8 +718,8 @@ def _get_accumulation_res_dt(a, dtype): cbrt = DPNPUnaryFunc( "cbrt", - ti._cbrt_result_type, - ti._cbrt, + ti_ext._cbrt_result_type, + ti_ext._cbrt, _CBRT_DOCSTRING, mkl_fn_to_call="_mkl_cbrt_to_call", mkl_impl_fn="_cbrt", @@ -1187,8 +1187,8 @@ def cumlogsumexp( exp2 = DPNPUnaryFunc( "exp2", - ti._exp2_result_type, - ti._exp2, + ti_ext._exp2_result_type, + ti_ext._exp2, _EXP2_DOCSTRING, mkl_fn_to_call="_mkl_exp2_to_call", mkl_impl_fn="_exp2", @@ -2107,8 +2107,8 @@ def logsumexp(x, /, *, axis=None, dtype=None, keepdims=False, out=None): reciprocal = DPNPUnaryFunc( "reciprocal", - ti._reciprocal_result_type, - ti._reciprocal, + ti_ext._reciprocal_result_type, + ti_ext._reciprocal, _RECIPROCAL_DOCSTRING, mkl_fn_to_call="_mkl_inv_to_call", mkl_impl_fn="_inv", @@ -2252,8 +2252,8 @@ def reduce_hypot(x, /, *, axis=None, dtype=None, keepdims=False, out=None): rsqrt = DPNPUnaryFunc( "rsqrt", - ti._rsqrt_result_type, - ti._rsqrt, + ti_ext._rsqrt_result_type, + ti_ext._rsqrt, _RSQRT_DOCSTRING, ) From ecd4991fe7eba4fa5c6c58a89152250c53d8f6b3 Mon Sep 17 00:00:00 2001 From: Anton <100830759+antonwolfy@users.noreply.github.com> Date: Thu, 5 Mar 2026 10:42:03 +0100 Subject: [PATCH 08/11] Clean up dpctl.tensor code (#2797) This PR adds a small clean up to already porting dpctl.tensor code: * remove unused includes * add missing includes * remove redundant namespace qualifications when calling function from the same namespace --- .../include/kernels/constructors.hpp | 2 +- .../kernels/elementwise_functions/common.hpp | 3 -- .../elementwise_functions/logaddexp.hpp | 1 - .../kernels/elementwise_functions/maximum.hpp | 1 - .../kernels/elementwise_functions/minimum.hpp | 1 - .../kernels/integer_advanced_indexing.hpp | 2 +- .../tensor/libtensor/source/accumulators.cpp | 13 +++--- .../tensor/libtensor/source/accumulators.hpp | 1 - .../source/boolean_advanced_indexing.cpp | 40 ++++++++---------- .../source/copy_and_cast_usm_to_usm.cpp | 9 ++-- .../libtensor/source/copy_as_contig.cpp | 42 +++++++++---------- .../tensor/libtensor/source/copy_for_roll.cpp | 9 ++-- .../source/device_support_queries.cpp | 2 +- .../tensor/libtensor/source/full_ctor.cpp | 5 ++- .../source/integer_advanced_indexing.cpp | 3 -- .../tensor/libtensor/source/tensor_ctors.cpp | 3 -- .../tensor/libtensor/source/zeros_ctor.cpp | 2 - .../tensor/libtensor/source/zeros_ctor.hpp | 1 - 18 files changed, 59 insertions(+), 81 deletions(-) diff --git a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp index 26ae46707a6b..f48dfa4d4077 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp @@ -33,8 +33,8 @@ //===----------------------------------------------------------------------===// #pragma once + #include -#include #include #include diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/common.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/common.hpp index d19930b722a9..e83426df8aa9 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/common.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/common.hpp @@ -33,11 +33,8 @@ #pragma once #include -#include -#include #include #include -#include #include #include diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/logaddexp.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/logaddexp.hpp index 8565df2cf528..7337b6e43eab 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/logaddexp.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/logaddexp.hpp @@ -46,7 +46,6 @@ #include "vec_size_util.hpp" #include "utils/math_utils.hpp" -#include "utils/offset_utils.hpp" #include "utils/type_dispatch_building.hpp" #include "utils/type_utils.hpp" diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/maximum.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/maximum.hpp index 067ccd84f059..f204b6640042 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/maximum.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/maximum.hpp @@ -45,7 +45,6 @@ #include "vec_size_util.hpp" #include "utils/math_utils.hpp" -#include "utils/offset_utils.hpp" #include "utils/type_dispatch_building.hpp" #include "utils/type_utils.hpp" diff --git a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/minimum.hpp b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/minimum.hpp index a38945f89a25..d18577a5cf4e 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/minimum.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/elementwise_functions/minimum.hpp @@ -44,7 +44,6 @@ #include "vec_size_util.hpp" #include "utils/math_utils.hpp" -#include "utils/offset_utils.hpp" #include "utils/type_dispatch_building.hpp" #include "utils/type_utils.hpp" diff --git a/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp b/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp index 7be2b3ea8591..f6d2f0175ce8 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/integer_advanced_indexing.hpp @@ -33,7 +33,7 @@ //===----------------------------------------------------------------------===// #pragma once -#include + #include #include #include diff --git a/dpctl_ext/tensor/libtensor/source/accumulators.cpp b/dpctl_ext/tensor/libtensor/source/accumulators.cpp index 82913010755a..c6ab96418d47 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators.cpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators.cpp @@ -32,17 +32,18 @@ /// This file defines functions of dpctl.tensor._tensor_impl extensions //===----------------------------------------------------------------------===// -#include #include #include #include +#include +#include +#include #include #include #include "dpnp4pybind11.hpp" #include -#include #include "kernels/accumulators.hpp" #include "simplify_iteration_space.hpp" @@ -196,8 +197,8 @@ std::size_t py_mask_positions(const dpctl::tensor::usm_ndarray &mask, int mask_nd = mask.get_ndim(); int nd = mask_nd; - dpctl::tensor::py_internal::compact_iteration_space( - nd, shape, strides_vector, compact_shape, compact_strides); + compact_iteration_space(nd, shape, strides_vector, compact_shape, + compact_strides); // Strided implementation auto strided_fn = @@ -351,8 +352,8 @@ std::size_t py_cumsum_1d(const dpctl::tensor::usm_ndarray &src, int src_nd = src.get_ndim(); int nd = src_nd; - dpctl::tensor::py_internal::compact_iteration_space( - nd, shape, strides_vector, compact_shape, compact_strides); + compact_iteration_space(nd, shape, strides_vector, compact_shape, + compact_strides); // Strided implementation auto strided_fn = cumsum_1d_strided_dispatch_vector[src_typeid]; diff --git a/dpctl_ext/tensor/libtensor/source/accumulators.hpp b/dpctl_ext/tensor/libtensor/source/accumulators.hpp index 42503093789b..e400aad2dceb 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators.hpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators.hpp @@ -39,7 +39,6 @@ #include #include "dpnp4pybind11.hpp" -#include namespace dpctl::tensor::py_internal { diff --git a/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp index a78cb1750b81..4c46e1e2fec8 100644 --- a/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp +++ b/dpctl_ext/tensor/libtensor/source/boolean_advanced_indexing.cpp @@ -336,21 +336,19 @@ std::pair shT masked_src_shape; shT ortho_src_strides; shT masked_src_strides; - dpctl::tensor::py_internal::split_iteration_space( - src_shape_vec, src_strides_vec, axis_start, axis_end, - ortho_src_shape, - masked_src_shape, // 4 vectors modified - ortho_src_strides, masked_src_strides); + split_iteration_space(src_shape_vec, src_strides_vec, axis_start, + axis_end, ortho_src_shape, + masked_src_shape, // 4 vectors modified + ortho_src_strides, masked_src_strides); shT ortho_dst_shape; shT masked_dst_shape; shT ortho_dst_strides; shT masked_dst_strides; - dpctl::tensor::py_internal::split_iteration_space( - dst_shape_vec, dst_strides_vec, axis_start, axis_start + 1, - ortho_dst_shape, - masked_dst_shape, // 4 vectors modified - ortho_dst_strides, masked_dst_strides); + split_iteration_space(dst_shape_vec, dst_strides_vec, axis_start, + axis_start + 1, ortho_dst_shape, + masked_dst_shape, // 4 vectors modified + ortho_dst_strides, masked_dst_strides); assert(ortho_src_shape.size() == static_cast(ortho_nd)); assert(ortho_dst_shape.size() == static_cast(ortho_nd)); @@ -366,7 +364,7 @@ std::pair py::ssize_t ortho_src_offset(0); py::ssize_t ortho_dst_offset(0); - dpctl::tensor::py_internal::simplify_iteration_space( + simplify_iteration_space( ortho_nd, _shape, ortho_src_strides, ortho_dst_strides, // output simplified_ortho_shape, simplified_ortho_src_strides, @@ -646,21 +644,19 @@ std::pair shT masked_dst_shape; shT ortho_dst_strides; shT masked_dst_strides; - dpctl::tensor::py_internal::split_iteration_space( - dst_shape_vec, dst_strides_vec, axis_start, axis_end, - ortho_dst_shape, - masked_dst_shape, // 4 vectors modified - ortho_dst_strides, masked_dst_strides); + split_iteration_space(dst_shape_vec, dst_strides_vec, axis_start, + axis_end, ortho_dst_shape, + masked_dst_shape, // 4 vectors modified + ortho_dst_strides, masked_dst_strides); shT ortho_rhs_shape; shT masked_rhs_shape; shT ortho_rhs_strides; shT masked_rhs_strides; - dpctl::tensor::py_internal::split_iteration_space( - rhs_shape_vec, rhs_strides_vec, axis_start, axis_start + 1, - ortho_rhs_shape, - masked_rhs_shape, // 4 vectors modified - ortho_rhs_strides, masked_rhs_strides); + split_iteration_space(rhs_shape_vec, rhs_strides_vec, axis_start, + axis_start + 1, ortho_rhs_shape, + masked_rhs_shape, // 4 vectors modified + ortho_rhs_strides, masked_rhs_strides); assert(ortho_dst_shape.size() == static_cast(ortho_nd)); assert(ortho_rhs_shape.size() == static_cast(ortho_nd)); @@ -676,7 +672,7 @@ std::pair py::ssize_t ortho_dst_offset(0); py::ssize_t ortho_rhs_offset(0); - dpctl::tensor::py_internal::simplify_iteration_space( + simplify_iteration_space( ortho_nd, _shape, ortho_dst_strides, ortho_rhs_strides, simplified_ortho_shape, simplified_ortho_dst_strides, simplified_ortho_rhs_strides, ortho_dst_offset, ortho_rhs_offset); diff --git a/dpctl_ext/tensor/libtensor/source/copy_and_cast_usm_to_usm.cpp b/dpctl_ext/tensor/libtensor/source/copy_and_cast_usm_to_usm.cpp index 3d20be02f885..9ea49ae1d88b 100644 --- a/dpctl_ext/tensor/libtensor/source/copy_and_cast_usm_to_usm.cpp +++ b/dpctl_ext/tensor/libtensor/source/copy_and_cast_usm_to_usm.cpp @@ -188,11 +188,10 @@ std::pair copy_usm_ndarray_into_usm_ndarray( const py::ssize_t *shape = src_shape; // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, shape, src_strides, dst_strides, - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, shape, src_strides, dst_strides, + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (nd < 2) { if (nd == 1) { diff --git a/dpctl_ext/tensor/libtensor/source/copy_as_contig.cpp b/dpctl_ext/tensor/libtensor/source/copy_as_contig.cpp index bbee24c95d4d..5d78862651fc 100644 --- a/dpctl_ext/tensor/libtensor/source/copy_as_contig.cpp +++ b/dpctl_ext/tensor/libtensor/source/copy_as_contig.cpp @@ -225,11 +225,11 @@ std::pair int nd = src_nd; // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, src_shape_vec.data(), src_strides_vec, dst.get_strides_vector(), - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, src_shape_vec.data(), src_strides_vec, + dst.get_strides_vector(), + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (!((0 == src_offset) && (0 == dst_offset))) { throw std::runtime_error( @@ -359,11 +359,11 @@ std::pair int nd = src_nd; // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, src_shape_vec.data(), src_strides_vec, dst.get_strides_vector(), - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, src_shape_vec.data(), src_strides_vec, + dst.get_strides_vector(), + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (!((0 == src_offset) && (0 == dst_offset))) { throw std::runtime_error( @@ -521,12 +521,11 @@ std::pair int nd = static_cast(batch_shape_vec.size()); // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, batch_shape_vec.data(), src_batch_strides_vec, - dst_batch_strides_vec, - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, batch_shape_vec.data(), src_batch_strides_vec, + dst_batch_strides_vec, + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (!((0 == src_offset) && (0 == dst_offset))) { throw std::runtime_error( @@ -714,12 +713,11 @@ std::pair int nd = static_cast(batch_shape_vec.size()); // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, batch_shape_vec.data(), src_batch_strides_vec, - dst_batch_strides_vec, - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, batch_shape_vec.data(), src_batch_strides_vec, + dst_batch_strides_vec, + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (!((0 == src_offset) && (0 == dst_offset))) { throw std::runtime_error( diff --git a/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp b/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp index a187b2247677..7742c1c96a4e 100644 --- a/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp +++ b/dpctl_ext/tensor/libtensor/source/copy_for_roll.cpp @@ -197,11 +197,10 @@ std::pair const py::ssize_t *shape = src_shape_ptr; // nd, simplified_* and *_offset are modified by reference - dpctl::tensor::py_internal::simplify_iteration_space( - nd, shape, src_strides, dst_strides, - // output - simplified_shape, simplified_src_strides, simplified_dst_strides, - src_offset, dst_offset); + simplify_iteration_space(nd, shape, src_strides, dst_strides, + // output + simplified_shape, simplified_src_strides, + simplified_dst_strides, src_offset, dst_offset); if (nd == 1 && simplified_src_strides[0] == 1 && simplified_dst_strides[0] == 1) { diff --git a/dpctl_ext/tensor/libtensor/source/device_support_queries.cpp b/dpctl_ext/tensor/libtensor/source/device_support_queries.cpp index 97a8ba83831e..3cc0952c2080 100644 --- a/dpctl_ext/tensor/libtensor/source/device_support_queries.cpp +++ b/dpctl_ext/tensor/libtensor/source/device_support_queries.cpp @@ -36,7 +36,7 @@ #include "dpnp4pybind11.hpp" #include -#include + #include namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/full_ctor.cpp b/dpctl_ext/tensor/libtensor/source/full_ctor.cpp index aef57836666e..dfe1d25b769c 100644 --- a/dpctl_ext/tensor/libtensor/source/full_ctor.cpp +++ b/dpctl_ext/tensor/libtensor/source/full_ctor.cpp @@ -32,7 +32,6 @@ /// This file defines functions of dpctl.tensor._tensor_impl extensions //===--------------------------------------------------------------------===// -#include #include #include #include @@ -42,11 +41,13 @@ #include #include "dpnp4pybind11.hpp" -#include +#include // py::cast> #include #include "kernels/constructors.hpp" +#include "utils/offset_utils.hpp" #include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" #include "utils/type_dispatch.hpp" #include "utils/type_utils.hpp" diff --git a/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp index 925cc2e895ed..c6021bdfd2d1 100644 --- a/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp +++ b/dpctl_ext/tensor/libtensor/source/integer_advanced_indexing.cpp @@ -34,7 +34,6 @@ //===----------------------------------------------------------------------===// #include -#include #include #include #include @@ -47,9 +46,7 @@ #include #include "dpnp4pybind11.hpp" -#include #include -#include #include "kernels/integer_advanced_indexing.hpp" #include "utils/memory_overlap.hpp" diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 98ab488e5879..7b151c773fe0 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -72,9 +72,6 @@ static_assert(std::is_same_v); namespace { -using dpctl::tensor::c_contiguous_strides; -using dpctl::tensor::f_contiguous_strides; - using dpctl::tensor::overlap::MemoryOverlap; using dpctl::tensor::overlap::SameLogicalTensors; diff --git a/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp b/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp index 2eb05e49f382..b9a2e01bea4a 100644 --- a/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp +++ b/dpctl_ext/tensor/libtensor/source/zeros_ctor.cpp @@ -32,7 +32,6 @@ /// This file defines functions of dpctl.tensor._tensor_impl extensions //===--------------------------------------------------------------------===// -#include #include #include #include @@ -41,7 +40,6 @@ #include #include "dpnp4pybind11.hpp" -#include #include #include "utils/output_validation.hpp" diff --git a/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp b/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp index 51a1903a0f36..d104e37f5533 100644 --- a/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp +++ b/dpctl_ext/tensor/libtensor/source/zeros_ctor.hpp @@ -39,7 +39,6 @@ #include #include "dpnp4pybind11.hpp" -#include namespace dpctl::tensor::py_internal { From 0edd3b18eb128a8ed45ca81cb54d4c143df9d206 Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Thu, 5 Mar 2026 13:27:40 +0100 Subject: [PATCH 09/11] Extend `._tensor_impl` with where(), clip() and type utils functions (#2778) This PR extends `_tensor_impl` in `dpctl_ext.tensor` with the `_where, _clip` and repeat functions (`_repeat_by_sequence, _repeat_by_scalar`) It also adds `repeat(), where(), clip()` and `can_cast, finfo, iinfo, isdtype, result_type` from `_type_utils.py` `to dpctl_ext.tensor and updates the corresponding dpnp functions to use these implementations internally --- dpctl_ext/tensor/CMakeLists.txt | 12 +- dpctl_ext/tensor/__init__.py | 22 +- dpctl_ext/tensor/_clip.py | 781 ++++++++++++++ dpctl_ext/tensor/_copy_utils.py | 6 +- dpctl_ext/tensor/_manipulation_functions.py | 273 ++++- dpctl_ext/tensor/_scalar_utils.py | 122 +++ dpctl_ext/tensor/_search_functions.py | 419 ++++++++ dpctl_ext/tensor/_type_utils.py | 999 ++++++++++++++++++ .../tensor/libtensor/include/kernels/clip.hpp | 357 +++++++ .../libtensor/include/kernels/repeat.hpp | 460 ++++++++ .../libtensor/include/kernels/where.hpp | 338 ++++++ dpctl_ext/tensor/libtensor/source/clip.cpp | 265 +++++ dpctl_ext/tensor/libtensor/source/clip.hpp | 57 + dpctl_ext/tensor/libtensor/source/repeat.cpp | 820 ++++++++++++++ dpctl_ext/tensor/libtensor/source/repeat.hpp | 83 ++ .../tensor/libtensor/source/tensor_ctors.cpp | 120 ++- dpctl_ext/tensor/libtensor/source/where.cpp | 265 +++++ dpctl_ext/tensor/libtensor/source/where.hpp | 57 + dpnp/dpnp_algo/dpnp_arraycreation.py | 2 +- dpnp/dpnp_algo/dpnp_elementwise_common.py | 2 +- dpnp/dpnp_array.py | 4 +- dpnp/dpnp_iface_functional.py | 8 +- dpnp/dpnp_iface_indexing.py | 2 +- dpnp/dpnp_iface_manipulation.py | 16 +- dpnp/dpnp_iface_mathematical.py | 19 +- dpnp/dpnp_iface_searching.py | 5 +- dpnp/dpnp_iface_sorting.py | 2 +- dpnp/dpnp_iface_statistics.py | 2 +- dpnp/dpnp_iface_trigonometric.py | 4 +- dpnp/dpnp_iface_types.py | 9 +- dpnp/dpnp_utils/dpnp_utils_common.py | 5 +- dpnp/dpnp_utils/dpnp_utils_linearalgebra.py | 10 +- dpnp/dpnp_utils/dpnp_utils_statistics.py | 5 +- dpnp/fft/dpnp_utils_fft.py | 8 +- dpnp/linalg/dpnp_iface_linalg.py | 5 +- dpnp/linalg/dpnp_utils_linalg.py | 2 +- dpnp/tests/test_arraymanipulation.py | 5 +- dpnp/tests/test_counting.py | 5 +- dpnp/tests/test_flipping.py | 5 +- dpnp/tests/test_indexing.py | 7 +- dpnp/tests/test_linalg.py | 5 +- dpnp/tests/test_manipulation.py | 5 +- dpnp/tests/test_mathematical.py | 11 +- dpnp/tests/test_product.py | 5 +- dpnp/tests/test_sort.py | 5 +- .../cupy/core_tests/test_ndarray.py | 5 +- .../cupy/lib_tests/test_shape_base.py | 5 +- .../cupy/manipulation_tests/test_dims.py | 5 +- .../cupy/manipulation_tests/test_transpose.py | 5 +- .../cupy/math_tests/test_sumprod.py | 5 +- .../cupy/sorting_tests/test_sort.py | 5 +- .../cupy/statistics_tests/test_meanvar.py | 5 +- dpnp/tests/third_party/cupy/testing/_loops.py | 5 +- 53 files changed, 5517 insertions(+), 142 deletions(-) create mode 100644 dpctl_ext/tensor/_clip.py create mode 100644 dpctl_ext/tensor/_scalar_utils.py create mode 100644 dpctl_ext/tensor/_search_functions.py create mode 100644 dpctl_ext/tensor/_type_utils.py create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/clip.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/repeat.hpp create mode 100644 dpctl_ext/tensor/libtensor/include/kernels/where.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/clip.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/clip.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/repeat.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/repeat.hpp create mode 100644 dpctl_ext/tensor/libtensor/source/where.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/where.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 0b166a202735..6f823a818ce7 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -58,10 +58,10 @@ set(_tensor_impl_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/zeros_ctor.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/triul_ctor.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/device_support_queries.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/repeat.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/clip.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/repeat.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/clip.cpp ) set(_static_lib_trgt simplify_iteration_space) @@ -92,10 +92,10 @@ endif() set(_no_fast_math_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_and_cast_usm_to_usm.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/clip.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/clip.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp ) #list( #APPEND _no_fast_math_sources diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index fa76faccc632..2c1e761beb3b 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -27,20 +27,21 @@ # ***************************************************************************** -from dpctl_ext.tensor._copy_utils import ( +from ._clip import clip +from ._copy_utils import ( asnumpy, astype, copy, from_numpy, to_numpy, ) -from dpctl_ext.tensor._ctors import ( +from ._ctors import ( eye, full, tril, triu, ) -from dpctl_ext.tensor._indexing_functions import ( +from ._indexing_functions import ( extract, nonzero, place, @@ -49,28 +50,39 @@ take, take_along_axis, ) -from dpctl_ext.tensor._manipulation_functions import ( +from ._manipulation_functions import ( + repeat, roll, ) -from dpctl_ext.tensor._reshape import reshape +from ._reshape import reshape +from ._search_functions import where +from ._type_utils import can_cast, finfo, iinfo, isdtype, result_type __all__ = [ "asnumpy", "astype", + "can_cast", "copy", + "clip", "extract", "eye", + "finfo", "from_numpy", "full", + "iinfo", + "isdtype", "nonzero", "place", "put", "put_along_axis", + "repeat", "reshape", + "result_type", "roll", "take", "take_along_axis", "to_numpy", "tril", "triu", + "where", ] diff --git a/dpctl_ext/tensor/_clip.py b/dpctl_ext/tensor/_clip.py new file mode 100644 index 000000000000..f145e9f2d98d --- /dev/null +++ b/dpctl_ext/tensor/_clip.py @@ -0,0 +1,781 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import dpctl +import dpctl.tensor as dpt +import dpctl.tensor._tensor_elementwise_impl as tei +from dpctl.utils import ExecutionPlacementError, SequentialOrderManager + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._tensor_impl as ti + +from ._copy_utils import ( + _empty_like_orderK, + _empty_like_pair_orderK, + _empty_like_triple_orderK, +) +from ._manipulation_functions import _broadcast_shape_impl +from ._scalar_utils import ( + _get_dtype, + _get_queue_usm_type, + _get_shape, + _validate_dtype, +) +from ._type_utils import ( + _can_cast, + _resolve_one_strong_one_weak_types, + _resolve_one_strong_two_weak_types, +) + + +def _check_clip_dtypes(res_dtype, arg1_dtype, arg2_dtype, sycl_dev): + """ + Checks if both types `arg1_dtype` and `arg2_dtype` can be + cast to `res_dtype` according to the rule `safe` + """ + if arg1_dtype == res_dtype and arg2_dtype == res_dtype: + return None, None, res_dtype + + _fp16 = sycl_dev.has_aspect_fp16 + _fp64 = sycl_dev.has_aspect_fp64 + if _can_cast(arg1_dtype, res_dtype, _fp16, _fp64) and _can_cast( + arg2_dtype, res_dtype, _fp16, _fp64 + ): + # prevent unnecessary casting + ret_buf1_dt = None if res_dtype == arg1_dtype else res_dtype + ret_buf2_dt = None if res_dtype == arg2_dtype else res_dtype + return ret_buf1_dt, ret_buf2_dt, res_dtype + else: + return None, None, None + + +def _clip_none(x, val, out, order, _binary_fn): + q1, x_usm_type = x.sycl_queue, x.usm_type + q2, val_usm_type = _get_queue_usm_type(val) + if q2 is None: + exec_q = q1 + res_usm_type = x_usm_type + else: + exec_q = dpctl.utils.get_execution_queue((q1, q2)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + res_usm_type = dpctl.utils.get_coerced_usm_type( + ( + x_usm_type, + val_usm_type, + ) + ) + dpctl.utils.validate_usm_type(res_usm_type, allow_none=False) + x_shape = x.shape + val_shape = _get_shape(val) + if not isinstance(val_shape, (tuple, list)): + raise TypeError( + "Shape of arguments can not be inferred. " + "Arguments are expected to be " + "lists, tuples, or both" + ) + try: + res_shape = _broadcast_shape_impl( + [ + x_shape, + val_shape, + ] + ) + except ValueError: + raise ValueError( + "operands could not be broadcast together with shapes " + f"{x_shape} and {val_shape}" + ) + sycl_dev = exec_q.sycl_device + x_dtype = x.dtype + val_dtype = _get_dtype(val, sycl_dev) + if not _validate_dtype(val_dtype): + raise ValueError("Operands have unsupported data types") + + val_dtype = _resolve_one_strong_one_weak_types(x_dtype, val_dtype, sycl_dev) + + res_dt = x.dtype + _fp16 = sycl_dev.has_aspect_fp16 + _fp64 = sycl_dev.has_aspect_fp64 + if not _can_cast(val_dtype, res_dt, _fp16, _fp64): + raise ValueError( + f"function 'clip' does not support input types " + f"({x_dtype}, {val_dtype}), " + "and the inputs could not be safely coerced to any " + "supported types according to the casting rule ''safe''." + ) + + orig_out = out + if out is not None: + if not isinstance(out, dpt.usm_ndarray): + raise TypeError( + f"output array must be of usm_ndarray type, got {type(out)}" + ) + + if not out.flags.writable: + raise ValueError("provided `out` array is read-only") + + if out.shape != res_shape: + raise ValueError( + "The shape of input and output arrays are inconsistent. " + f"Expected output shape is {res_shape}, got {out.shape}" + ) + + if res_dt != out.dtype: + raise ValueError( + f"Output array of type {res_dt} is needed, got {out.dtype}" + ) + + if dpctl.utils.get_execution_queue((exec_q, out.sycl_queue)) is None: + raise ExecutionPlacementError( + "Input and output allocation queues are not compatible" + ) + + if ti._array_overlap(x, out): + if not ti._same_logical_tensors(x, out): + out = dpt.empty_like(out) + + if isinstance(val, dpt.usm_ndarray): + if ( + ti._array_overlap(val, out) + and not ti._same_logical_tensors(val, out) + and val_dtype == res_dt + ): + out = dpt.empty_like(out) + + if isinstance(val, dpt.usm_ndarray): + val_ary = val + else: + val_ary = dpt.asarray(val, dtype=val_dtype, sycl_queue=exec_q) + + if order == "A": + order = ( + "F" + if all( + arr.flags.f_contiguous + for arr in ( + x, + val_ary, + ) + ) + else "C" + ) + if val_dtype == res_dt: + if out is None: + if order == "K": + out = _empty_like_pair_orderK( + x, val_ary, res_dt, res_shape, res_usm_type, exec_q + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + if x_shape != res_shape: + x = dpt.broadcast_to(x, res_shape) + if val_ary.shape != res_shape: + val_ary = dpt.broadcast_to(val_ary, res_shape) + _manager = SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + ht_binary_ev, binary_ev = _binary_fn( + src1=x, src2=val_ary, dst=out, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_binary_ev, binary_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[binary_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, copy_ev) + out = orig_out + return out + else: + if order == "K": + buf = _empty_like_orderK(val_ary, res_dt) + else: + buf = dpt.empty_like(val_ary, dtype=res_dt, order=order) + _manager = SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=val_ary, dst=buf, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_copy_ev, copy_ev) + if out is None: + if order == "K": + out = _empty_like_pair_orderK( + x, buf, res_dt, res_shape, res_usm_type, exec_q + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + + if x_shape != res_shape: + x = dpt.broadcast_to(x, res_shape) + buf = dpt.broadcast_to(buf, res_shape) + ht_binary_ev, binary_ev = _binary_fn( + src1=x, + src2=buf, + dst=out, + sycl_queue=exec_q, + depends=[copy_ev], + ) + _manager.add_event_pair(ht_binary_ev, binary_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[binary_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, cpy_ev) + out = orig_out + return out + + +def clip(x, /, min=None, max=None, out=None, order="K"): + """clip(x, min=None, max=None, out=None, order="K") + + Clips to the range [`min_i`, `max_i`] for each element `x_i` + in `x`. + + Args: + x (usm_ndarray): Array containing elements to clip. + Must be compatible with `min` and `max` according + to broadcasting rules. + min ({None, Union[usm_ndarray, bool, int, float, complex]}, optional): + Array containing minimum values. + Must be compatible with `x` and `max` according + to broadcasting rules. + max ({None, Union[usm_ndarray, bool, int, float, complex]}, optional): + Array containing maximum values. + Must be compatible with `x` and `min` according + to broadcasting rules. + out ({None, usm_ndarray}, optional): + Output array to populate. + Array must have the correct shape and the expected data type. + order ("C","F","A","K", optional): + Memory layout of the newly output array, if parameter `out` is + `None`. + Default: "K". + + Returns: + usm_ndarray: + An array with elements clipped to the range [`min`, `max`]. + The returned array has the same data type as `x`. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError( + "Expected `x` to be of dpctl.tensor.usm_ndarray type, got " + f"{type(x)}" + ) + if order not in ["K", "C", "F", "A"]: + order = "K" + if x.dtype.kind in "iu": + if isinstance(min, int) and min <= dpt_ext.iinfo(x.dtype).min: + min = None + if isinstance(max, int) and max >= dpt_ext.iinfo(x.dtype).max: + max = None + if min is None and max is None: + exec_q = x.sycl_queue + orig_out = out + if out is not None: + if not isinstance(out, dpt.usm_ndarray): + raise TypeError( + "output array must be of usm_ndarray type, got " + f"{type(out)}" + ) + + if not out.flags.writable: + raise ValueError("provided `out` array is read-only") + + if out.shape != x.shape: + raise ValueError( + "The shape of input and output arrays are " + f"inconsistent. Expected output shape is {x.shape}, " + f"got {out.shape}" + ) + + if x.dtype != out.dtype: + raise ValueError( + f"Output array of type {x.dtype} is needed, " + f"got {out.dtype}" + ) + + if ( + dpctl.utils.get_execution_queue((exec_q, out.sycl_queue)) + is None + ): + raise ExecutionPlacementError( + "Input and output allocation queues are not compatible" + ) + + if ti._array_overlap(x, out): + if not ti._same_logical_tensors(x, out): + out = dpt.empty_like(out) + else: + return out + else: + if order == "K": + out = _empty_like_orderK(x, x.dtype) + else: + out = dpt.empty_like(x, order=order) + + _manager = SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=x, dst=out, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_copy_ev, copy_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[copy_ev], + ) + _manager.add_event_pair(ht_copy_ev, cpy_ev) + out = orig_out + return out + elif max is None: + return _clip_none(x, min, out, order, tei._maximum) + elif min is None: + return _clip_none(x, max, out, order, tei._minimum) + else: + q1, x_usm_type = x.sycl_queue, x.usm_type + q2, min_usm_type = _get_queue_usm_type(min) + q3, max_usm_type = _get_queue_usm_type(max) + if q2 is None and q3 is None: + exec_q = q1 + res_usm_type = x_usm_type + elif q3 is None: + exec_q = dpctl.utils.get_execution_queue((q1, q2)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + res_usm_type = dpctl.utils.get_coerced_usm_type( + ( + x_usm_type, + min_usm_type, + ) + ) + elif q2 is None: + exec_q = dpctl.utils.get_execution_queue((q1, q3)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + res_usm_type = dpctl.utils.get_coerced_usm_type( + ( + x_usm_type, + max_usm_type, + ) + ) + else: + exec_q = dpctl.utils.get_execution_queue((q1, q2, q3)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + res_usm_type = dpctl.utils.get_coerced_usm_type( + ( + x_usm_type, + min_usm_type, + max_usm_type, + ) + ) + dpctl.utils.validate_usm_type(res_usm_type, allow_none=False) + x_shape = x.shape + min_shape = _get_shape(min) + max_shape = _get_shape(max) + if not all( + isinstance(s, (tuple, list)) + for s in ( + min_shape, + max_shape, + ) + ): + raise TypeError( + "Shape of arguments can not be inferred. " + "Arguments are expected to be " + "lists, tuples, or both" + ) + try: + res_shape = _broadcast_shape_impl( + [ + x_shape, + min_shape, + max_shape, + ] + ) + except ValueError: + raise ValueError( + "operands could not be broadcast together with shapes " + f"{x_shape}, {min_shape}, and {max_shape}" + ) + sycl_dev = exec_q.sycl_device + x_dtype = x.dtype + min_dtype = _get_dtype(min, sycl_dev) + max_dtype = _get_dtype(max, sycl_dev) + if not all(_validate_dtype(o) for o in (min_dtype, max_dtype)): + raise ValueError("Operands have unsupported data types") + + min_dtype, max_dtype = _resolve_one_strong_two_weak_types( + x_dtype, min_dtype, max_dtype, sycl_dev + ) + + buf1_dt, buf2_dt, res_dt = _check_clip_dtypes( + x_dtype, + min_dtype, + max_dtype, + sycl_dev, + ) + + if res_dt is None: + raise ValueError( + f"function '{clip}' does not support input types " + f"({x_dtype}, {min_dtype}, {max_dtype}), " + "and the inputs could not be safely coerced to any " + "supported types according to the casting rule ''safe''." + ) + + orig_out = out + if out is not None: + if not isinstance(out, dpt.usm_ndarray): + raise TypeError( + "output array must be of usm_ndarray type, got " + f"{type(out)}" + ) + + if not out.flags.writable: + raise ValueError("provided `out` array is read-only") + + if out.shape != res_shape: + raise ValueError( + "The shape of input and output arrays are " + f"inconsistent. Expected output shape is {res_shape}, " + f"got {out.shape}" + ) + + if res_dt != out.dtype: + raise ValueError( + f"Output array of type {res_dt} is needed, " + f"got {out.dtype}" + ) + + if ( + dpctl.utils.get_execution_queue((exec_q, out.sycl_queue)) + is None + ): + raise ExecutionPlacementError( + "Input and output allocation queues are not compatible" + ) + + if ti._array_overlap(x, out): + if not ti._same_logical_tensors(x, out): + out = dpt.empty_like(out) + + if isinstance(min, dpt.usm_ndarray): + if ( + ti._array_overlap(min, out) + and not ti._same_logical_tensors(min, out) + and buf1_dt is None + ): + out = dpt.empty_like(out) + + if isinstance(max, dpt.usm_ndarray): + if ( + ti._array_overlap(max, out) + and not ti._same_logical_tensors(max, out) + and buf2_dt is None + ): + out = dpt.empty_like(out) + + if isinstance(min, dpt.usm_ndarray): + a_min = min + else: + a_min = dpt.asarray(min, dtype=min_dtype, sycl_queue=exec_q) + if isinstance(max, dpt.usm_ndarray): + a_max = max + else: + a_max = dpt.asarray(max, dtype=max_dtype, sycl_queue=exec_q) + + if order == "A": + order = ( + "F" + if all( + arr.flags.f_contiguous + for arr in ( + x, + a_min, + a_max, + ) + ) + else "C" + ) + if buf1_dt is None and buf2_dt is None: + if out is None: + if order == "K": + out = _empty_like_triple_orderK( + x, + a_min, + a_max, + res_dt, + res_shape, + res_usm_type, + exec_q, + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + if x_shape != res_shape: + x = dpt.broadcast_to(x, res_shape) + if a_min.shape != res_shape: + a_min = dpt.broadcast_to(a_min, res_shape) + if a_max.shape != res_shape: + a_max = dpt.broadcast_to(a_max, res_shape) + _manager = SequentialOrderManager[exec_q] + dep_ev = _manager.submitted_events + ht_binary_ev, binary_ev = ti._clip( + src=x, + min=a_min, + max=a_max, + dst=out, + sycl_queue=exec_q, + depends=dep_ev, + ) + _manager.add_event_pair(ht_binary_ev, binary_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[binary_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, cpy_ev) + out = orig_out + return out + + elif buf1_dt is None: + if order == "K": + buf2 = _empty_like_orderK(a_max, buf2_dt) + else: + buf2 = dpt.empty_like(a_max, dtype=buf2_dt, order=order) + _manager = SequentialOrderManager[exec_q] + dep_ev = _manager.submitted_events + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_max, dst=buf2, sycl_queue=exec_q, depends=dep_ev + ) + _manager.add_event_pair(ht_copy_ev, copy_ev) + if out is None: + if order == "K": + out = _empty_like_triple_orderK( + x, + a_min, + buf2, + res_dt, + res_shape, + res_usm_type, + exec_q, + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + + x = dpt.broadcast_to(x, res_shape) + if a_min.shape != res_shape: + a_min = dpt.broadcast_to(a_min, res_shape) + buf2 = dpt.broadcast_to(buf2, res_shape) + ht_binary_ev, binary_ev = ti._clip( + src=x, + min=a_min, + max=buf2, + dst=out, + sycl_queue=exec_q, + depends=[copy_ev], + ) + _manager.add_event_pair(ht_binary_ev, binary_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[binary_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, cpy_ev) + out = orig_out + return out + + elif buf2_dt is None: + if order == "K": + buf1 = _empty_like_orderK(a_min, buf1_dt) + else: + buf1 = dpt.empty_like(a_min, dtype=buf1_dt, order=order) + _manager = SequentialOrderManager[exec_q] + dep_ev = _manager.submitted_events + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_min, dst=buf1, sycl_queue=exec_q, depends=dep_ev + ) + _manager.add_event_pair(ht_copy_ev, copy_ev) + if out is None: + if order == "K": + out = _empty_like_triple_orderK( + x, + buf1, + a_max, + res_dt, + res_shape, + res_usm_type, + exec_q, + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + + x = dpt.broadcast_to(x, res_shape) + buf1 = dpt.broadcast_to(buf1, res_shape) + if a_max.shape != res_shape: + a_max = dpt.broadcast_to(a_max, res_shape) + ht_binary_ev, binary_ev = ti._clip( + src=x, + min=buf1, + max=a_max, + dst=out, + sycl_queue=exec_q, + depends=[copy_ev], + ) + _manager.add_event_pair(ht_binary_ev, binary_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[binary_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, cpy_ev) + out = orig_out + return out + + if order == "K": + if ( + x.flags.c_contiguous + and a_min.flags.c_contiguous + and a_max.flags.c_contiguous + ): + order = "C" + elif ( + x.flags.f_contiguous + and a_min.flags.f_contiguous + and a_max.flags.f_contiguous + ): + order = "F" + if order == "K": + buf1 = _empty_like_orderK(a_min, buf1_dt) + else: + buf1 = dpt.empty_like(a_min, dtype=buf1_dt, order=order) + + _manager = SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + ht_copy1_ev, copy1_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_min, dst=buf1, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_copy1_ev, copy1_ev) + if order == "K": + buf2 = _empty_like_orderK(a_max, buf2_dt) + else: + buf2 = dpt.empty_like(a_max, dtype=buf2_dt, order=order) + ht_copy2_ev, copy2_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=a_max, dst=buf2, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_copy2_ev, copy2_ev) + if out is None: + if order == "K": + out = _empty_like_triple_orderK( + x, buf1, buf2, res_dt, res_shape, res_usm_type, exec_q + ) + else: + out = dpt.empty( + res_shape, + dtype=res_dt, + usm_type=res_usm_type, + sycl_queue=exec_q, + order=order, + ) + + x = dpt.broadcast_to(x, res_shape) + buf1 = dpt.broadcast_to(buf1, res_shape) + buf2 = dpt.broadcast_to(buf2, res_shape) + ht_, clip_ev = ti._clip( + src=x, + min=buf1, + max=buf2, + dst=out, + sycl_queue=exec_q, + depends=[copy1_ev, copy2_ev], + ) + _manager.add_event_pair(ht_, clip_ev) + return out diff --git a/dpctl_ext/tensor/_copy_utils.py b/dpctl_ext/tensor/_copy_utils.py index 5d1ac209c86b..64689057eb84 100644 --- a/dpctl_ext/tensor/_copy_utils.py +++ b/dpctl_ext/tensor/_copy_utils.py @@ -37,7 +37,6 @@ import numpy as np from dpctl.tensor._data_types import _get_dtype from dpctl.tensor._device import normalize_queue_device -from dpctl.tensor._type_utils import _dtype_supported_by_device_impl # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor @@ -45,6 +44,7 @@ import dpctl_ext.tensor._tensor_impl as ti from ._numpy_helper import normalize_axis_index +from ._type_utils import _dtype_supported_by_device_impl __doc__ = ( "Implementation module for copy- and cast- operations on " @@ -291,7 +291,7 @@ def _prepare_indices_arrays(inds, q, usm_type): ) # promote to a common integral type if possible - ind_dt = dpt.result_type(*inds) + ind_dt = dpt_ext.result_type(*inds) if ind_dt.kind not in "ui": raise ValueError( "cannot safely promote indices to an integer data type" @@ -1013,7 +1013,7 @@ def astype( else: target_dtype = _get_dtype(newdtype, usm_ary.sycl_queue) - if not dpt.can_cast(ary_dtype, target_dtype, casting=casting): + if not dpt_ext.can_cast(ary_dtype, target_dtype, casting=casting): raise TypeError( f"Can not cast from {ary_dtype} to {newdtype} " f"according to rule {casting}." diff --git a/dpctl_ext/tensor/_manipulation_functions.py b/dpctl_ext/tensor/_manipulation_functions.py index fa8fc27876b3..f1b8b46dbcbc 100644 --- a/dpctl_ext/tensor/_manipulation_functions.py +++ b/dpctl_ext/tensor/_manipulation_functions.py @@ -26,17 +26,20 @@ # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** +import itertools import operator +import dpctl import dpctl.tensor as dpt import dpctl.utils as dputils import numpy as np # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti -from ._numpy_helper import normalize_axis_tuple +from ._numpy_helper import normalize_axis_index, normalize_axis_tuple __doc__ = ( "Implementation module for array manipulation " @@ -44,6 +47,274 @@ ) +def _broadcast_shape_impl(shapes): + if len(set(shapes)) == 1: + return shapes[0] + mutable_shapes = False + nds = [len(s) for s in shapes] + biggest = max(nds) + sh_len = len(shapes) + for i in range(sh_len): + diff = biggest - nds[i] + if diff > 0: + ty = type(shapes[i]) + shapes[i] = ty( + itertools.chain(itertools.repeat(1, diff), shapes[i]) + ) + common_shape = [] + for axis in range(biggest): + lengths = [s[axis] for s in shapes] + unique = set(lengths + [1]) + if len(unique) > 2: + raise ValueError( + "Shape mismatch: two or more arrays have " + f"incompatible dimensions on axis ({axis},)" + ) + elif len(unique) == 2: + unique.remove(1) + new_length = unique.pop() + common_shape.append(new_length) + for i in range(sh_len): + if shapes[i][axis] == 1: + if not mutable_shapes: + shapes = [list(s) for s in shapes] + mutable_shapes = True + shapes[i][axis] = new_length + else: + common_shape.append(1) + + return tuple(common_shape) + + +def repeat(x, repeats, /, *, axis=None): + """repeat(x, repeats, axis=None) + + Repeat elements of an array on a per-element basis. + + Args: + x (usm_ndarray): input array + + repeats (Union[int, Sequence[int, ...], usm_ndarray]): + The number of repetitions for each element. + + `repeats` must be broadcast-compatible with `N` where `N` is + `prod(x.shape)` if `axis` is `None` and `x.shape[axis]` + otherwise. + + If `repeats` is an array, it must have an integer data type. + Otherwise, `repeats` must be a Python integer or sequence of + Python integers (i.e., a tuple, list, or range). + + axis (Optional[int]): + The axis along which to repeat values. If `axis` is `None`, the + function repeats elements of the flattened array. Default: `None`. + + Returns: + usm_ndarray: + output array with repeated elements. + + If `axis` is `None`, the returned array is one-dimensional, + otherwise, it has the same shape as `x`, except for the axis along + which elements were repeated. + + The returned array will have the same data type as `x`. + The returned array will be located on the same device as `x` and + have the same USM allocation type as `x`. + + Raises: + AxisError: if `axis` value is invalid. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(x)}.") + + x_ndim = x.ndim + x_shape = x.shape + if axis is not None: + axis = normalize_axis_index(operator.index(axis), x_ndim) + axis_size = x_shape[axis] + else: + axis_size = x.size + + scalar = False + if isinstance(repeats, int): + if repeats < 0: + raise ValueError("`repeats` must be a positive integer") + usm_type = x.usm_type + exec_q = x.sycl_queue + scalar = True + elif isinstance(repeats, dpt.usm_ndarray): + if repeats.ndim > 1: + raise ValueError( + "`repeats` array must be 0- or 1-dimensional, got " + f"{repeats.ndim}" + ) + exec_q = dpctl.utils.get_execution_queue( + (x.sycl_queue, repeats.sycl_queue) + ) + if exec_q is None: + raise dputils.ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + usm_type = dpctl.utils.get_coerced_usm_type( + ( + x.usm_type, + repeats.usm_type, + ) + ) + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if not dpt_ext.can_cast(repeats.dtype, dpt.int64, casting="same_kind"): + raise TypeError( + f"'repeats' data type {repeats.dtype} cannot be cast to " + "'int64' according to the casting rule ''safe.''" + ) + if repeats.size == 1: + scalar = True + # bring the single element to the host + if repeats.ndim == 0: + repeats = int(repeats) + else: + # Get the single element explicitly + # since non-0D arrays can not be converted to scalars + repeats = int(repeats[0]) + if repeats < 0: + raise ValueError("`repeats` elements must be positive") + else: + if repeats.size != axis_size: + raise ValueError( + "'repeats' array must be broadcastable to the size of " + "the repeated axis" + ) + if not dpt.all(repeats >= 0): + raise ValueError("'repeats' elements must be positive") + + elif isinstance(repeats, (tuple, list, range)): + usm_type = x.usm_type + exec_q = x.sycl_queue + + len_reps = len(repeats) + if len_reps == 1: + repeats = repeats[0] + if repeats < 0: + raise ValueError("`repeats` elements must be positive") + scalar = True + else: + if len_reps != axis_size: + raise ValueError( + "`repeats` sequence must have the same length as the " + "repeated axis" + ) + repeats = dpt.asarray( + repeats, dtype=dpt.int64, usm_type=usm_type, sycl_queue=exec_q + ) + if not dpt.all(repeats >= 0): + raise ValueError("`repeats` elements must be positive") + else: + raise TypeError( + "Expected int, sequence, or `usm_ndarray` for second argument," + f"got {type(repeats)}" + ) + + _manager = dputils.SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + if scalar: + res_axis_size = repeats * axis_size + if axis is not None: + res_shape = x_shape[:axis] + (res_axis_size,) + x_shape[axis + 1 :] + else: + res_shape = (res_axis_size,) + res = dpt.empty( + res_shape, dtype=x.dtype, usm_type=usm_type, sycl_queue=exec_q + ) + if res_axis_size > 0: + ht_rep_ev, rep_ev = ti._repeat_by_scalar( + src=x, + dst=res, + reps=repeats, + axis=axis, + sycl_queue=exec_q, + depends=dep_evs, + ) + _manager.add_event_pair(ht_rep_ev, rep_ev) + else: + if repeats.dtype != dpt.int64: + rep_buf = dpt.empty( + repeats.shape, + dtype=dpt.int64, + usm_type=usm_type, + sycl_queue=exec_q, + ) + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=repeats, dst=rep_buf, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(ht_copy_ev, copy_ev) + cumsum = dpt.empty( + (axis_size,), + dtype=dpt.int64, + usm_type=usm_type, + sycl_queue=exec_q, + ) + # _cumsum_1d synchronizes so `depends` ends here safely + res_axis_size = ti._cumsum_1d( + rep_buf, cumsum, sycl_queue=exec_q, depends=[copy_ev] + ) + if axis is not None: + res_shape = ( + x_shape[:axis] + (res_axis_size,) + x_shape[axis + 1 :] + ) + else: + res_shape = (res_axis_size,) + res = dpt.empty( + res_shape, + dtype=x.dtype, + usm_type=usm_type, + sycl_queue=exec_q, + ) + if res_axis_size > 0: + ht_rep_ev, rep_ev = ti._repeat_by_sequence( + src=x, + dst=res, + reps=rep_buf, + cumsum=cumsum, + axis=axis, + sycl_queue=exec_q, + ) + _manager.add_event_pair(ht_rep_ev, rep_ev) + else: + cumsum = dpt.empty( + (axis_size,), + dtype=dpt.int64, + usm_type=usm_type, + sycl_queue=exec_q, + ) + res_axis_size = ti._cumsum_1d( + repeats, cumsum, sycl_queue=exec_q, depends=dep_evs + ) + if axis is not None: + res_shape = ( + x_shape[:axis] + (res_axis_size,) + x_shape[axis + 1 :] + ) + else: + res_shape = (res_axis_size,) + res = dpt.empty( + res_shape, + dtype=x.dtype, + usm_type=usm_type, + sycl_queue=exec_q, + ) + if res_axis_size > 0: + ht_rep_ev, rep_ev = ti._repeat_by_sequence( + src=x, + dst=res, + reps=repeats, + cumsum=cumsum, + axis=axis, + sycl_queue=exec_q, + ) + _manager.add_event_pair(ht_rep_ev, rep_ev) + return res + + def roll(x, /, shift, *, axis=None): """ roll(x, shift, axis) diff --git a/dpctl_ext/tensor/_scalar_utils.py b/dpctl_ext/tensor/_scalar_utils.py new file mode 100644 index 000000000000..86787baea8cc --- /dev/null +++ b/dpctl_ext/tensor/_scalar_utils.py @@ -0,0 +1,122 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import numbers + +import dpctl.memory as dpm +import dpctl.tensor as dpt +import numpy as np +from dpctl.tensor._usmarray import _is_object_with_buffer_protocol as _is_buffer + +from ._type_utils import ( + WeakBooleanType, + WeakComplexType, + WeakFloatingType, + WeakIntegralType, + _to_device_supported_dtype, +) + + +def _get_queue_usm_type(o): + """Return SYCL device where object `o` allocated memory, or None.""" + if isinstance(o, dpt.usm_ndarray): + return o.sycl_queue, o.usm_type + elif hasattr(o, "__sycl_usm_array_interface__"): + try: + m = dpm.as_usm_memory(o) + return m.sycl_queue, m.get_usm_type() + except Exception: + return None, None + return None, None + + +def _get_dtype(o, dev): + if isinstance(o, dpt.usm_ndarray): + return o.dtype + if hasattr(o, "__sycl_usm_array_interface__"): + return dpt.asarray(o).dtype + if _is_buffer(o): + host_dt = np.array(o).dtype + dev_dt = _to_device_supported_dtype(host_dt, dev) + return dev_dt + if hasattr(o, "dtype"): + dev_dt = _to_device_supported_dtype(o.dtype, dev) + return dev_dt + if isinstance(o, bool): + return WeakBooleanType(o) + if isinstance(o, int): + return WeakIntegralType(o) + if isinstance(o, float): + return WeakFloatingType(o) + if isinstance(o, complex): + return WeakComplexType(o) + return np.object_ + + +def _validate_dtype(dt) -> bool: + return isinstance( + dt, + (WeakBooleanType, WeakIntegralType, WeakFloatingType, WeakComplexType), + ) or ( + isinstance(dt, dpt.dtype) + and dt + in [ + dpt.bool, + dpt.int8, + dpt.uint8, + dpt.int16, + dpt.uint16, + dpt.int32, + dpt.uint32, + dpt.int64, + dpt.uint64, + dpt.float16, + dpt.float32, + dpt.float64, + dpt.complex64, + dpt.complex128, + ] + ) + + +def _get_shape(o): + if isinstance(o, dpt.usm_ndarray): + return o.shape + if _is_buffer(o): + return memoryview(o).shape + if isinstance(o, numbers.Number): + return () + return getattr(o, "shape", tuple()) + + +__all__ = [ + "_get_dtype", + "_get_queue_usm_type", + "_get_shape", + "_validate_dtype", +] diff --git a/dpctl_ext/tensor/_search_functions.py b/dpctl_ext/tensor/_search_functions.py new file mode 100644 index 000000000000..a82845e3520c --- /dev/null +++ b/dpctl_ext/tensor/_search_functions.py @@ -0,0 +1,419 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +import dpctl +import dpctl.tensor as dpt +from dpctl.utils import ExecutionPlacementError, SequentialOrderManager + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._tensor_impl as ti + +from ._copy_utils import _empty_like_orderK, _empty_like_triple_orderK +from ._manipulation_functions import _broadcast_shape_impl +from ._scalar_utils import ( + _get_dtype, + _get_queue_usm_type, + _get_shape, + _validate_dtype, +) +from ._type_utils import ( + WeakBooleanType, + WeakComplexType, + WeakFloatingType, + WeakIntegralType, + _all_data_types, + _can_cast, + _is_weak_dtype, + _strong_dtype_num_kind, + _to_device_supported_dtype, + _weak_type_num_kind, +) + + +def _default_dtype_from_weak_type(dt, dev): + if isinstance(dt, WeakBooleanType): + return dpt.bool + if isinstance(dt, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(dt, WeakFloatingType): + return dpt.dtype(ti.default_device_fp_type(dev)) + if isinstance(dt, WeakComplexType): + return dpt.dtype(ti.default_device_complex_type(dev)) + + +def _resolve_two_weak_types(o1_dtype, o2_dtype, dev): + """Resolves two weak data types per NEP-0050""" + if _is_weak_dtype(o1_dtype): + if _is_weak_dtype(o2_dtype): + return _default_dtype_from_weak_type( + o1_dtype, dev + ), _default_dtype_from_weak_type(o2_dtype, dev) + o1_kind_num = _weak_type_num_kind(o1_dtype) + o2_kind_num = _strong_dtype_num_kind(o2_dtype) + if o1_kind_num > o2_kind_num: + if isinstance(o1_dtype, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)), o2_dtype + if isinstance(o1_dtype, WeakComplexType): + if o2_dtype is dpt.float16 or o2_dtype is dpt.float32: + return dpt.complex64, o2_dtype + return ( + _to_device_supported_dtype(dpt.complex128, dev), + o2_dtype, + ) + return _to_device_supported_dtype(dpt.float64, dev), o2_dtype + else: + return o2_dtype, o2_dtype + elif _is_weak_dtype(o2_dtype): + o1_kind_num = _strong_dtype_num_kind(o1_dtype) + o2_kind_num = _weak_type_num_kind(o2_dtype) + if o2_kind_num > o1_kind_num: + if isinstance(o2_dtype, WeakIntegralType): + return o1_dtype, dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(o2_dtype, WeakComplexType): + if o1_dtype is dpt.float16 or o1_dtype is dpt.float32: + return o1_dtype, dpt.complex64 + return o1_dtype, _to_device_supported_dtype(dpt.complex128, dev) + return ( + o1_dtype, + _to_device_supported_dtype(dpt.float64, dev), + ) + else: + return o1_dtype, o1_dtype + else: + return o1_dtype, o2_dtype + + +def _where_result_type(dt1, dt2, dev): + res_dtype = dpt_ext.result_type(dt1, dt2) + fp16 = dev.has_aspect_fp16 + fp64 = dev.has_aspect_fp64 + + all_dts = _all_data_types(fp16, fp64) + if res_dtype in all_dts: + return res_dtype + else: + for res_dtype_ in all_dts: + if _can_cast(dt1, res_dtype_, fp16, fp64) and _can_cast( + dt2, res_dtype_, fp16, fp64 + ): + return res_dtype_ + return None + + +def where(condition, x1, x2, /, *, order="K", out=None): + """ + Returns :class:`dpctl.tensor.usm_ndarray` with elements chosen + from ``x1`` or ``x2`` depending on ``condition``. + + Args: + condition (usm_ndarray): When ``True`` yields from ``x1``, + and otherwise yields from ``x2``. + Must be compatible with ``x1`` and ``x2`` according + to broadcasting rules. + x1 (Union[usm_ndarray, bool, int, float, complex]): + Array from which values are chosen when ``condition`` is ``True``. + Must be compatible with ``condition`` and ``x2`` according + to broadcasting rules. + x2 (Union[usm_ndarray, bool, int, float, complex]): + Array from which values are chosen when ``condition`` is not + ``True``. + Must be compatible with ``condition`` and ``x2`` according + to broadcasting rules. + order (``"K"``, ``"C"``, ``"F"``, ``"A"``, optional): + Memory layout of the new output array, + if parameter ``out`` is ``None``. + Default: ``"K"``. + out (Optional[usm_ndarray]): + the array into which the result is written. + The data type of `out` must match the expected shape and the + expected data type of the result. + If ``None`` then a new array is returned. Default: ``None``. + + Returns: + usm_ndarray: + An array with elements from ``x1`` where ``condition`` is ``True``, + and elements from ``x2`` elsewhere. + + The data type of the returned array is determined by applying + the Type Promotion Rules to ``x1`` and ``x2``. + """ + if not isinstance(condition, dpt.usm_ndarray): + raise TypeError( + "Expecting dpctl.tensor.usm_ndarray type, " f"got {type(condition)}" + ) + if order not in ["K", "C", "F", "A"]: + order = "K" + q1, condition_usm_type = condition.sycl_queue, condition.usm_type + q2, x1_usm_type = _get_queue_usm_type(x1) + q3, x2_usm_type = _get_queue_usm_type(x2) + if q2 is None and q3 is None: + exec_q = q1 + out_usm_type = condition_usm_type + elif q3 is None: + exec_q = dpctl.utils.get_execution_queue((q1, q2)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + out_usm_type = dpctl.utils.get_coerced_usm_type( + ( + condition_usm_type, + x1_usm_type, + ) + ) + elif q2 is None: + exec_q = dpctl.utils.get_execution_queue((q1, q3)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + out_usm_type = dpctl.utils.get_coerced_usm_type( + ( + condition_usm_type, + x2_usm_type, + ) + ) + else: + exec_q = dpctl.utils.get_execution_queue((q1, q2, q3)) + if exec_q is None: + raise ExecutionPlacementError( + "Execution placement can not be unambiguously inferred " + "from input arguments." + ) + out_usm_type = dpctl.utils.get_coerced_usm_type( + ( + condition_usm_type, + x1_usm_type, + x2_usm_type, + ) + ) + dpctl.utils.validate_usm_type(out_usm_type, allow_none=False) + condition_shape = condition.shape + x1_shape = _get_shape(x1) + x2_shape = _get_shape(x2) + if not all( + isinstance(s, (tuple, list)) + for s in ( + x1_shape, + x2_shape, + ) + ): + raise TypeError( + "Shape of arguments can not be inferred. " + "Arguments are expected to be " + "lists, tuples, or both" + ) + try: + res_shape = _broadcast_shape_impl( + [ + condition_shape, + x1_shape, + x2_shape, + ] + ) + except ValueError: + raise ValueError( + "operands could not be broadcast together with shapes " + f"{condition_shape}, {x1_shape}, and {x2_shape}" + ) + sycl_dev = exec_q.sycl_device + x1_dtype = _get_dtype(x1, sycl_dev) + x2_dtype = _get_dtype(x2, sycl_dev) + if not all(_validate_dtype(o) for o in (x1_dtype, x2_dtype)): + raise ValueError("Operands have unsupported data types") + x1_dtype, x2_dtype = _resolve_two_weak_types(x1_dtype, x2_dtype, sycl_dev) + out_dtype = _where_result_type(x1_dtype, x2_dtype, sycl_dev) + if out_dtype is None: + raise TypeError( + "function 'where' does not support input " + f"types ({x1_dtype}, {x2_dtype}), " + "and the inputs could not be safely coerced " + "to any supported types according to the casting rule ''safe''." + ) + + orig_out = out + if out is not None: + if not isinstance(out, dpt.usm_ndarray): + raise TypeError( + "output array must be of usm_ndarray type, got " f"{type(out)}" + ) + + if not out.flags.writable: + raise ValueError("provided `out` array is read-only") + + if out.shape != res_shape: + raise ValueError( + "The shape of input and output arrays are " + f"inconsistent. Expected output shape is {res_shape}, " + f"got {out.shape}" + ) + + if out_dtype != out.dtype: + raise ValueError( + f"Output array of type {out_dtype} is needed, " + f"got {out.dtype}" + ) + + if dpctl.utils.get_execution_queue((exec_q, out.sycl_queue)) is None: + raise ExecutionPlacementError( + "Input and output allocation queues are not compatible" + ) + + if ti._array_overlap(condition, out) and not ti._same_logical_tensors( + condition, out + ): + out = dpt.empty_like(out) + + if isinstance(x1, dpt.usm_ndarray): + if ( + ti._array_overlap(x1, out) + and not ti._same_logical_tensors(x1, out) + and x1_dtype == out_dtype + ): + out = dpt.empty_like(out) + + if isinstance(x2, dpt.usm_ndarray): + if ( + ti._array_overlap(x2, out) + and not ti._same_logical_tensors(x2, out) + and x2_dtype == out_dtype + ): + out = dpt.empty_like(out) + + if order == "A": + order = ( + "F" + if all( + arr.flags.f_contiguous + for arr in ( + condition, + x1, + x2, + ) + ) + else "C" + ) + if not isinstance(x1, dpt.usm_ndarray): + x1 = dpt.asarray(x1, dtype=x1_dtype, sycl_queue=exec_q) + if not isinstance(x2, dpt.usm_ndarray): + x2 = dpt.asarray(x2, dtype=x2_dtype, sycl_queue=exec_q) + + if condition.size == 0: + if out is not None: + return out + else: + if order == "K": + return _empty_like_triple_orderK( + condition, + x1, + x2, + out_dtype, + res_shape, + out_usm_type, + exec_q, + ) + else: + return dpt.empty( + res_shape, + dtype=out_dtype, + order=order, + usm_type=out_usm_type, + sycl_queue=exec_q, + ) + + _manager = SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + if x1_dtype != out_dtype: + if order == "K": + _x1 = _empty_like_orderK(x1, out_dtype) + else: + _x1 = dpt.empty_like(x1, dtype=out_dtype, order=order) + ht_copy1_ev, copy1_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=x1, dst=_x1, sycl_queue=exec_q, depends=dep_evs + ) + x1 = _x1 + _manager.add_event_pair(ht_copy1_ev, copy1_ev) + + if x2_dtype != out_dtype: + if order == "K": + _x2 = _empty_like_orderK(x2, out_dtype) + else: + _x2 = dpt.empty_like(x2, dtype=out_dtype, order=order) + ht_copy2_ev, copy2_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=x2, dst=_x2, sycl_queue=exec_q, depends=dep_evs + ) + x2 = _x2 + _manager.add_event_pair(ht_copy2_ev, copy2_ev) + + if out is None: + if order == "K": + out = _empty_like_triple_orderK( + condition, x1, x2, out_dtype, res_shape, out_usm_type, exec_q + ) + else: + out = dpt.empty( + res_shape, + dtype=out_dtype, + order=order, + usm_type=out_usm_type, + sycl_queue=exec_q, + ) + + if condition_shape != res_shape: + condition = dpt.broadcast_to(condition, res_shape) + if x1_shape != res_shape: + x1 = dpt.broadcast_to(x1, res_shape) + if x2_shape != res_shape: + x2 = dpt.broadcast_to(x2, res_shape) + + dep_evs = _manager.submitted_events + hev, where_ev = ti._where( + condition=condition, + x1=x1, + x2=x2, + dst=out, + sycl_queue=exec_q, + depends=dep_evs, + ) + _manager.add_event_pair(hev, where_ev) + if not (orig_out is None or orig_out is out): + # Copy the out data from temporary buffer to original memory + ht_copy_out_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=out, + dst=orig_out, + sycl_queue=exec_q, + depends=[where_ev], + ) + _manager.add_event_pair(ht_copy_out_ev, cpy_ev) + out = orig_out + + return out diff --git a/dpctl_ext/tensor/_type_utils.py b/dpctl_ext/tensor/_type_utils.py new file mode 100644 index 000000000000..1e386e15dfa3 --- /dev/null +++ b/dpctl_ext/tensor/_type_utils.py @@ -0,0 +1,999 @@ +# ***************************************************************************** +# Copyright (c) 2026, Intel Corporation +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# - Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# ***************************************************************************** + +from __future__ import annotations + +import dpctl.tensor as dpt +import numpy as np + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._tensor_impl as ti + + +def _all_data_types(_fp16, _fp64): + _non_fp_types = [ + dpt.bool, + dpt.int8, + dpt.uint8, + dpt.int16, + dpt.uint16, + dpt.int32, + dpt.uint32, + dpt.int64, + dpt.uint64, + ] + if _fp64: + if _fp16: + return _non_fp_types + [ + dpt.float16, + dpt.float32, + dpt.float64, + dpt.complex64, + dpt.complex128, + ] + else: + return _non_fp_types + [ + dpt.float32, + dpt.float64, + dpt.complex64, + dpt.complex128, + ] + else: + if _fp16: + return _non_fp_types + [ + dpt.float16, + dpt.float32, + dpt.complex64, + ] + else: + return _non_fp_types + [ + dpt.float32, + dpt.complex64, + ] + + +def _acceptance_fn_default_binary( + arg1_dtype, arg2_dtype, ret_buf1_dt, ret_buf2_dt, res_dt, sycl_dev +): + return True + + +def _acceptance_fn_default_unary(arg_dtype, ret_buf_dt, res_dt, sycl_dev): + return True + + +def _acceptance_fn_divide( + arg1_dtype, arg2_dtype, ret_buf1_dt, ret_buf2_dt, res_dt, sycl_dev +): + # both are being promoted, if the kind of result is + # different than the kind of original input dtypes, + # we use default dtype for the resulting kind. + # This covers, e.g. (array_dtype_i1 / array_dtype_u1) + # result of which in divide is double (in NumPy), but + # regular type promotion rules peg at float16 + if (ret_buf1_dt.kind != arg1_dtype.kind) and ( + ret_buf2_dt.kind != arg2_dtype.kind + ): + default_dt = _get_device_default_dtype(res_dt.kind, sycl_dev) + if res_dt == default_dt: + return True + else: + return False + else: + return True + + +def _acceptance_fn_negative(arg_dtype, buf_dt, res_dt, sycl_dev): + # negative is not defined for boolean data type + if arg_dtype.char == "?": + raise ValueError( + "The `negative` function, the `-` operator, is not supported " + "for inputs of data type bool, use the `~` operator or the " + "`logical_not` function instead" + ) + else: + return True + + +def _acceptance_fn_reciprocal(arg_dtype, buf_dt, res_dt, sycl_dev): + # if the kind of result is different from the kind of input, we use the + # default floating-point dtype for the resulting kind. This guarantees + # alignment of reciprocal and divide output types. + if buf_dt.kind != arg_dtype.kind: + default_dt = _get_device_default_dtype(res_dt.kind, sycl_dev) + if res_dt == default_dt: + return True + else: + return False + else: + return True + + +def _acceptance_fn_subtract( + arg1_dtype, arg2_dtype, buf1_dt, buf2_dt, res_dt, sycl_dev +): + # subtract is not defined for boolean data type + if arg1_dtype.char == "?" and arg2_dtype.char == "?": + raise ValueError( + "The `subtract` function, the `-` operator, is not supported " + "for inputs of data type bool, use the `^` operator, the " + "`bitwise_xor`, or the `logical_xor` function instead" + ) + else: + return True + + +def _can_cast( + from_: dpt.dtype, to_: dpt.dtype, _fp16: bool, _fp64: bool, casting="safe" +) -> bool: + """ + Can `from_` be cast to `to_` safely on a device with + fp16 and fp64 aspects as given? + """ + if not _dtype_supported_by_device_impl(to_, _fp16, _fp64): + return False + can_cast_v = np.can_cast(from_, to_, casting=casting) # ask NumPy + if _fp16 and _fp64: + return can_cast_v + if not can_cast_v: + if ( + from_.kind in "biu" + and to_.kind in "fc" + and _is_maximal_inexact_type(to_, _fp16, _fp64) + ): + return True + + return can_cast_v + + +def _dtype_supported_by_device_impl( + dt: dpt.dtype, has_fp16: bool, has_fp64: bool +) -> bool: + if has_fp64: + if not has_fp16: + if dt is dpt.float16: + return False + else: + if dt is dpt.float64: + return False + elif dt is dpt.complex128: + return False + if not has_fp16 and dt is dpt.float16: + return False + return True + + +def _find_buf_dtype(arg_dtype, query_fn, sycl_dev, acceptance_fn): + res_dt = query_fn(arg_dtype) + if res_dt: + return None, res_dt + + _fp16 = sycl_dev.has_aspect_fp16 + _fp64 = sycl_dev.has_aspect_fp64 + all_dts = _all_data_types(_fp16, _fp64) + for buf_dt in all_dts: + if _can_cast(arg_dtype, buf_dt, _fp16, _fp64): + res_dt = query_fn(buf_dt) + if res_dt: + acceptable = acceptance_fn(arg_dtype, buf_dt, res_dt, sycl_dev) + if acceptable: + return buf_dt, res_dt + else: + continue + + return None, None + + +def _find_buf_dtype2(arg1_dtype, arg2_dtype, query_fn, sycl_dev, acceptance_fn): + res_dt = query_fn(arg1_dtype, arg2_dtype) + if res_dt: + return None, None, res_dt + + _fp16 = sycl_dev.has_aspect_fp16 + _fp64 = sycl_dev.has_aspect_fp64 + all_dts = _all_data_types(_fp16, _fp64) + for buf1_dt in all_dts: + for buf2_dt in all_dts: + if _can_cast(arg1_dtype, buf1_dt, _fp16, _fp64) and _can_cast( + arg2_dtype, buf2_dt, _fp16, _fp64 + ): + res_dt = query_fn(buf1_dt, buf2_dt) + if res_dt: + ret_buf1_dt = None if buf1_dt == arg1_dtype else buf1_dt + ret_buf2_dt = None if buf2_dt == arg2_dtype else buf2_dt + if ret_buf1_dt is None or ret_buf2_dt is None: + return ret_buf1_dt, ret_buf2_dt, res_dt + else: + acceptable = acceptance_fn( + arg1_dtype, + arg2_dtype, + ret_buf1_dt, + ret_buf2_dt, + res_dt, + sycl_dev, + ) + if acceptable: + return ret_buf1_dt, ret_buf2_dt, res_dt + else: + continue + + return None, None, None + + +def _find_buf_dtype_in_place_op(arg1_dtype, arg2_dtype, query_fn, sycl_dev): + res_dt = query_fn(arg1_dtype, arg2_dtype) + if res_dt: + return None, res_dt + + _fp16 = sycl_dev.has_aspect_fp16 + _fp64 = sycl_dev.has_aspect_fp64 + if _can_cast(arg2_dtype, arg1_dtype, _fp16, _fp64, casting="same_kind"): + res_dt = query_fn(arg1_dtype, arg1_dtype) + if res_dt: + return arg1_dtype, res_dt + + return None, None + + +def _get_device_default_dtype(dt_kind, sycl_dev): + if dt_kind == "b": + return dpt.dtype(ti.default_device_bool_type(sycl_dev)) + elif dt_kind == "i": + return dpt.dtype(ti.default_device_int_type(sycl_dev)) + elif dt_kind == "u": + return dpt.dtype(ti.default_device_uint_type(sycl_dev)) + elif dt_kind == "f": + return dpt.dtype(ti.default_device_fp_type(sycl_dev)) + elif dt_kind == "c": + return dpt.dtype(ti.default_device_complex_type(sycl_dev)) + raise RuntimeError + + +def _is_maximal_inexact_type(dt: dpt.dtype, _fp16: bool, _fp64: bool): + """ + Return True if data type `dt` is the + maximal size inexact data type + """ + if _fp64: + return dt in [dpt.float64, dpt.complex128] + return dt in [dpt.float32, dpt.complex64] + + +def _to_device_supported_dtype(dt, dev): + has_fp16 = dev.has_aspect_fp16 + has_fp64 = dev.has_aspect_fp64 + + return _to_device_supported_dtype_impl(dt, has_fp16, has_fp64) + + +def _to_device_supported_dtype_impl(dt, has_fp16, has_fp64): + if has_fp64: + if not has_fp16: + if dt is dpt.float16: + return dpt.float32 + else: + if dt is dpt.float64: + return dpt.float32 + elif dt is dpt.complex128: + return dpt.complex64 + if not has_fp16 and dt is dpt.float16: + return dpt.float32 + return dt + + +class WeakBooleanType: + """Python type representing type of Python boolean objects""" + + def __init__(self, o): + self.o_ = o + + def get(self): + return self.o_ + + +class WeakIntegralType: + """Python type representing type of Python integral objects""" + + def __init__(self, o): + self.o_ = o + + def get(self): + return self.o_ + + +class WeakFloatingType: + """Python type representing type of Python floating point objects""" + + def __init__(self, o): + self.o_ = o + + def get(self): + return self.o_ + + +class WeakComplexType: + """Python type representing type of Python complex floating point objects""" + + def __init__(self, o): + self.o_ = o + + def get(self): + return self.o_ + + +def _weak_type_num_kind(o): + _map = {"?": 0, "i": 1, "f": 2, "c": 3} + if isinstance(o, WeakBooleanType): + return _map["?"] + if isinstance(o, WeakIntegralType): + return _map["i"] + if isinstance(o, WeakFloatingType): + return _map["f"] + if isinstance(o, WeakComplexType): + return _map["c"] + raise TypeError( + f"Unexpected type {o} while expecting " + "`WeakBooleanType`, `WeakIntegralType`," + "`WeakFloatingType`, or `WeakComplexType`." + ) + + +def _strong_dtype_num_kind(o): + _map = {"b": 0, "i": 1, "u": 1, "f": 2, "c": 3} + if not isinstance(o, dpt.dtype): + raise TypeError + k = o.kind + if k in _map: + return _map[k] + raise ValueError(f"Unrecognized kind {k} for dtype {o}") + + +def _is_weak_dtype(dtype): + return isinstance( + dtype, + (WeakBooleanType, WeakIntegralType, WeakFloatingType, WeakComplexType), + ) + + +def _resolve_weak_types(o1_dtype, o2_dtype, dev): + """Resolves weak data type per NEP-0050""" + if _is_weak_dtype(o1_dtype): + if _is_weak_dtype(o2_dtype): + raise ValueError + o1_kind_num = _weak_type_num_kind(o1_dtype) + o2_kind_num = _strong_dtype_num_kind(o2_dtype) + if o1_kind_num > o2_kind_num: + if isinstance(o1_dtype, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)), o2_dtype + if isinstance(o1_dtype, WeakComplexType): + if o2_dtype is dpt.float16 or o2_dtype is dpt.float32: + return dpt.complex64, o2_dtype + return ( + _to_device_supported_dtype(dpt.complex128, dev), + o2_dtype, + ) + return _to_device_supported_dtype(dpt.float64, dev), o2_dtype + else: + return o2_dtype, o2_dtype + elif _is_weak_dtype(o2_dtype): + o1_kind_num = _strong_dtype_num_kind(o1_dtype) + o2_kind_num = _weak_type_num_kind(o2_dtype) + if o2_kind_num > o1_kind_num: + if isinstance(o2_dtype, WeakIntegralType): + return o1_dtype, dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(o2_dtype, WeakComplexType): + if o1_dtype is dpt.float16 or o1_dtype is dpt.float32: + return o1_dtype, dpt.complex64 + return o1_dtype, _to_device_supported_dtype(dpt.complex128, dev) + return ( + o1_dtype, + _to_device_supported_dtype(dpt.float64, dev), + ) + else: + return o1_dtype, o1_dtype + else: + return o1_dtype, o2_dtype + + +def _resolve_weak_types_all_py_ints(o1_dtype, o2_dtype, dev): + """ + Resolves weak data type per NEP-0050 for comparisons and + divide, where result type is known and special behavior + is needed to handle mixed integer kinds and Python integers + without overflow + """ + if _is_weak_dtype(o1_dtype): + if _is_weak_dtype(o2_dtype): + raise ValueError + o1_kind_num = _weak_type_num_kind(o1_dtype) + o2_kind_num = _strong_dtype_num_kind(o2_dtype) + if o1_kind_num > o2_kind_num: + if isinstance(o1_dtype, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)), o2_dtype + if isinstance(o1_dtype, WeakComplexType): + if o2_dtype is dpt.float16 or o2_dtype is dpt.float32: + return dpt.complex64, o2_dtype + return ( + _to_device_supported_dtype(dpt.complex128, dev), + o2_dtype, + ) + return _to_device_supported_dtype(dpt.float64, dev), o2_dtype + else: + if o1_kind_num == o2_kind_num and isinstance( + o1_dtype, WeakIntegralType + ): + o1_val = o1_dtype.get() + o2_iinfo = dpt_ext.iinfo(o2_dtype) + if (o1_val < o2_iinfo.min) or (o1_val > o2_iinfo.max): + return dpt.dtype(np.min_scalar_type(o1_val)), o2_dtype + return o2_dtype, o2_dtype + elif _is_weak_dtype(o2_dtype): + o1_kind_num = _strong_dtype_num_kind(o1_dtype) + o2_kind_num = _weak_type_num_kind(o2_dtype) + if o2_kind_num > o1_kind_num: + if isinstance(o2_dtype, WeakIntegralType): + return o1_dtype, dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(o2_dtype, WeakComplexType): + if o1_dtype is dpt.float16 or o1_dtype is dpt.float32: + return o1_dtype, dpt.complex64 + return o1_dtype, _to_device_supported_dtype(dpt.complex128, dev) + return ( + o1_dtype, + _to_device_supported_dtype(dpt.float64, dev), + ) + else: + if o1_kind_num == o2_kind_num and isinstance( + o2_dtype, WeakIntegralType + ): + o2_val = o2_dtype.get() + o1_iinfo = dpt_ext.iinfo(o1_dtype) + if (o2_val < o1_iinfo.min) or (o2_val > o1_iinfo.max): + return o1_dtype, dpt.dtype(np.min_scalar_type(o2_val)) + return o1_dtype, o1_dtype + else: + return o1_dtype, o2_dtype + + +def _resolve_one_strong_two_weak_types(st_dtype, dtype1, dtype2, dev): + """ + Resolves weak data types per NEP-0050, + where the second and third arguments are + permitted to be weak types + """ + if _is_weak_dtype(st_dtype): + raise ValueError + if _is_weak_dtype(dtype1): + if _is_weak_dtype(dtype2): + kind_num1 = _weak_type_num_kind(dtype1) + kind_num2 = _weak_type_num_kind(dtype2) + st_kind_num = _strong_dtype_num_kind(st_dtype) + + if kind_num1 > st_kind_num: + if isinstance(dtype1, WeakIntegralType): + ret_dtype1 = dpt.dtype(ti.default_device_int_type(dev)) + elif isinstance(dtype1, WeakComplexType): + if st_dtype is dpt.float16 or st_dtype is dpt.float32: + ret_dtype1 = dpt.complex64 + ret_dtype1 = _to_device_supported_dtype(dpt.complex128, dev) + else: + ret_dtype1 = _to_device_supported_dtype(dpt.float64, dev) + else: + ret_dtype1 = st_dtype + + if kind_num2 > st_kind_num: + if isinstance(dtype2, WeakIntegralType): + ret_dtype2 = dpt.dtype(ti.default_device_int_type(dev)) + elif isinstance(dtype2, WeakComplexType): + if st_dtype is dpt.float16 or st_dtype is dpt.float32: + ret_dtype2 = dpt.complex64 + ret_dtype2 = _to_device_supported_dtype(dpt.complex128, dev) + else: + ret_dtype2 = _to_device_supported_dtype(dpt.float64, dev) + else: + ret_dtype2 = st_dtype + + return ret_dtype1, ret_dtype2 + + max_dt_num_kind, max_dtype = max( + [ + (_strong_dtype_num_kind(st_dtype), st_dtype), + (_strong_dtype_num_kind(dtype2), dtype2), + ] + ) + dt1_kind_num = _weak_type_num_kind(dtype1) + if dt1_kind_num > max_dt_num_kind: + if isinstance(dtype1, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)), dtype2 + if isinstance(dtype1, WeakComplexType): + if max_dtype is dpt.float16 or max_dtype is dpt.float32: + return dpt.complex64, dtype2 + return ( + _to_device_supported_dtype(dpt.complex128, dev), + dtype2, + ) + return _to_device_supported_dtype(dpt.float64, dev), dtype2 + else: + return max_dtype, dtype2 + elif _is_weak_dtype(dtype2): + max_dt_num_kind, max_dtype = max( + [ + (_strong_dtype_num_kind(st_dtype), st_dtype), + (_strong_dtype_num_kind(dtype1), dtype1), + ] + ) + dt2_kind_num = _weak_type_num_kind(dtype2) + if dt2_kind_num > max_dt_num_kind: + if isinstance(dtype2, WeakIntegralType): + return dtype1, dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(dtype2, WeakComplexType): + if max_dtype is dpt.float16 or max_dtype is dpt.float32: + return dtype1, dpt.complex64 + return ( + dtype1, + _to_device_supported_dtype(dpt.complex128, dev), + ) + return dtype1, _to_device_supported_dtype(dpt.float64, dev) + else: + return dtype1, max_dtype + else: + # both are strong dtypes + # return unmodified + return dtype1, dtype2 + + +def _resolve_one_strong_one_weak_types(st_dtype, dtype, dev): + """Resolves one weak data type with one strong data type per NEP-0050""" + if _is_weak_dtype(st_dtype): + raise ValueError + if _is_weak_dtype(dtype): + st_kind_num = _strong_dtype_num_kind(st_dtype) + kind_num = _weak_type_num_kind(dtype) + if kind_num > st_kind_num: + if isinstance(dtype, WeakIntegralType): + return dpt.dtype(ti.default_device_int_type(dev)) + if isinstance(dtype, WeakComplexType): + if st_dtype is dpt.float16 or st_dtype is dpt.float32: + return dpt.complex64 + return _to_device_supported_dtype(dpt.complex128, dev) + return _to_device_supported_dtype(dpt.float64, dev) + else: + return st_dtype + else: + return dtype + + +class finfo_object: + """ + `numpy.finfo` subclass which returns Python floating-point scalars for + `eps`, `max`, `min`, and `smallest_normal` attributes. + """ + + def __init__(self, dtype): + _supported_dtype([dpt.dtype(dtype)]) + self._finfo = np.finfo(dtype) + + @property + def bits(self): + """Number of bits occupied by the real-valued floating-point data type.""" + return int(self._finfo.bits) + + @property + def smallest_normal(self): + """ + Smallest positive real-valued floating-point number with full + precision. + """ + return float(self._finfo.smallest_normal) + + @property + def tiny(self): + """An alias for `smallest_normal`""" + return float(self._finfo.tiny) + + @property + def eps(self): + """ + Difference between 1.0 and the next smallest representable real-valued + floating-point number larger than 1.0 according to the IEEE-754 + standard. + """ + return float(self._finfo.eps) + + @property + def epsneg(self): + """ + Difference between 1.0 and the next smallest representable real-valued + floating-point number smaller than 1.0 according to the IEEE-754 + standard. + """ + return float(self._finfo.epsneg) + + @property + def min(self): + """Smallest representable real-valued number.""" + return float(self._finfo.min) + + @property + def max(self): + """Largest representable real-valued number.""" + return float(self._finfo.max) + + @property + def resolution(self): + """The approximate decimal resolution of this type.""" + return float(self._finfo.resolution) + + @property + def precision(self): + """ + The approximate number of decimal digits to which this kind of + floating point type is precise. + """ + return float(self._finfo.precision) + + @property + def dtype(self): + """ + The dtype for which finfo returns information. For complex input, the + returned dtype is the associated floating point dtype for its real and + complex components. + """ + return self._finfo.dtype + + def __str__(self): + return self._finfo.__str__() + + def __repr__(self): + return self._finfo.__repr__() + + +def can_cast(from_, to, /, *, casting="safe") -> bool: + """can_cast(from, to, casting="safe") + + Determines if one data type can be cast to another data type according \ + to Type Promotion Rules. + + Args: + from_ (Union[usm_ndarray, dtype]): + source data type. If `from_` is an array, a device-specific type + promotion rules apply. + to (dtype): + target data type + casting (Optional[str]): + controls what kind of data casting may occur. + + * "no" means data types should not be cast at all. + * "safe" means only casts that preserve values are allowed. + * "same_kind" means only safe casts and casts within a kind, + like `float64` to `float32`, are allowed. + * "unsafe" means any data conversion can be done. + + Default: `"safe"`. + + Returns: + bool: + Gives `True` if cast can occur according to the casting rule. + + Device-specific type promotion rules take into account which data type are + and are not supported by a specific device. + """ + if isinstance(to, dpt.usm_ndarray): + raise TypeError(f"Expected `dpt.dtype` type, got {type(to)}.") + + dtype_to = dpt.dtype(to) + _supported_dtype([dtype_to]) + + if isinstance(from_, dpt.usm_ndarray): + dtype_from = from_.dtype + return _can_cast( + dtype_from, + dtype_to, + from_.sycl_device.has_aspect_fp16, + from_.sycl_device.has_aspect_fp64, + casting=casting, + ) + else: + dtype_from = dpt.dtype(from_) + _supported_dtype([dtype_from]) + # query casting as if all dtypes are supported + return _can_cast(dtype_from, dtype_to, True, True, casting=casting) + + +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the dtype that results from applying the Type Promotion Rules to \ + the arguments. + + Args: + arrays_and_dtypes (Union[usm_ndarray, dtype]): + An arbitrary length sequence of usm_ndarray objects or dtypes. + + Returns: + dtype: + The dtype resulting from an operation involving the + input arrays and dtypes. + """ + dtypes = [] + devices = [] + weak_dtypes = [] + for arg_i in arrays_and_dtypes: + if isinstance(arg_i, dpt.usm_ndarray): + devices.append(arg_i.sycl_device) + dtypes.append(arg_i.dtype) + elif isinstance(arg_i, int): + weak_dtypes.append(WeakIntegralType(arg_i)) + elif isinstance(arg_i, float): + weak_dtypes.append(WeakFloatingType(arg_i)) + elif isinstance(arg_i, complex): + weak_dtypes.append(WeakComplexType(arg_i)) + elif isinstance(arg_i, bool): + weak_dtypes.append(WeakBooleanType(arg_i)) + else: + dt = dpt.dtype(arg_i) + _supported_dtype([dt]) + dtypes.append(dt) + + has_fp16 = True + has_fp64 = True + target_dev = None + if devices: + inspected = False + for d in devices: + if inspected: + unsame_fp16_support = d.has_aspect_fp16 != has_fp16 + unsame_fp64_support = d.has_aspect_fp64 != has_fp64 + if unsame_fp16_support or unsame_fp64_support: + raise ValueError( + "Input arrays reside on devices " + "with different device supports; " + "unable to determine which " + "device-specific type promotion rules " + "to use." + ) + else: + has_fp16 = d.has_aspect_fp16 + has_fp64 = d.has_aspect_fp64 + target_dev = d + inspected = True + + if not dtypes and weak_dtypes: + dtypes.append(weak_dtypes[0].get()) + + if not (has_fp16 and has_fp64): + for dt in dtypes: + if not _dtype_supported_by_device_impl(dt, has_fp16, has_fp64): + raise ValueError( + f"Argument {dt} is not supported by the device" + ) + res_dt = np.result_type(*dtypes) + res_dt = _to_device_supported_dtype_impl(res_dt, has_fp16, has_fp64) + for wdt in weak_dtypes: + pair = _resolve_weak_types(wdt, res_dt, target_dev) + res_dt = np.result_type(*pair) + res_dt = _to_device_supported_dtype_impl(res_dt, has_fp16, has_fp64) + else: + res_dt = np.result_type(*dtypes) + if weak_dtypes: + weak_dt_obj = [wdt.get() for wdt in weak_dtypes] + res_dt = np.result_type(res_dt, *weak_dt_obj) + + return res_dt + + +def iinfo(dtype, /): + """iinfo(dtype) + + Returns machine limits for integer data types. + + Args: + dtype (dtype, usm_ndarray): + integer dtype or + an array with integer dtype. + + Returns: + iinfo_object: + An object with the following attributes: + + * bits: int + number of bits occupied by the data type + * max: int + largest representable number. + * min: int + smallest representable number. + * dtype: dtype + integer data type. + """ + if isinstance(dtype, dpt.usm_ndarray): + dtype = dtype.dtype + _supported_dtype([dpt.dtype(dtype)]) + return np.iinfo(dtype) + + +def finfo(dtype, /): + """finfo(type) + + Returns machine limits for floating-point data types. + + Args: + dtype (dtype, usm_ndarray): floating-point dtype or + an array with floating point data type. + If complex, the information is about its component + data type. + + Returns: + finfo_object: + an object have the following attributes: + + * bits: int + number of bits occupied by dtype. + * eps: float + difference between 1.0 and the next smallest representable + real-valued floating-point number larger than 1.0 according + to the IEEE-754 standard. + * max: float + largest representable real-valued number. + * min: float + smallest representable real-valued number. + * smallest_normal: float + smallest positive real-valued floating-point number with + full precision. + * dtype: dtype + real-valued floating-point data type. + + """ + if isinstance(dtype, dpt.usm_ndarray): + dtype = dtype.dtype + _supported_dtype([dpt.dtype(dtype)]) + return finfo_object(dtype) + + +def _supported_dtype(dtypes): + for dtype in dtypes: + if dtype.char not in "?bBhHiIlLqQefdFD": + raise ValueError(f"Dpctl doesn't support dtype {dtype}.") + return True + + +def isdtype(dtype, kind): + """isdtype(dtype, kind) + + Returns a boolean indicating whether a provided `dtype` is + of a specified data type `kind`. + + See [array API](array_api) for more information. + + [array_api]: https://data-apis.org/array-api/latest/ + """ + + if not isinstance(dtype, np.dtype): + raise TypeError(f"Expected instance of `dpt.dtype`, got {dtype}") + + if isinstance(kind, np.dtype): + return dtype == kind + + elif isinstance(kind, str): + if kind == "bool": + return dtype == np.dtype("bool") + elif kind == "signed integer": + return dtype.kind == "i" + elif kind == "unsigned integer": + return dtype.kind == "u" + elif kind == "integral": + return dtype.kind in "iu" + elif kind == "real floating": + return dtype.kind == "f" + elif kind == "complex floating": + return dtype.kind == "c" + elif kind == "numeric": + return dtype.kind in "iufc" + else: + raise ValueError(f"Unrecognized data type kind: {kind}") + + elif isinstance(kind, tuple): + return any(isdtype(dtype, k) for k in kind) + + else: + raise TypeError(f"Unsupported data type kind: {kind}") + + +def _default_accumulation_dtype(inp_dt, q): + """Gives default output data type for given input data + type `inp_dt` when accumulation is performed on queue `q` + """ + inp_kind = inp_dt.kind + if inp_kind in "bi": + res_dt = dpt.dtype(ti.default_device_int_type(q)) + if inp_dt.itemsize > res_dt.itemsize: + res_dt = inp_dt + elif inp_kind in "u": + res_dt = dpt.dtype(ti.default_device_uint_type(q)) + res_ii = dpt_ext.iinfo(res_dt) + inp_ii = dpt_ext.iinfo(inp_dt) + if inp_ii.min >= res_ii.min and inp_ii.max <= res_ii.max: + pass + else: + res_dt = inp_dt + elif inp_kind in "fc": + res_dt = inp_dt + + return res_dt + + +def _default_accumulation_dtype_fp_types(inp_dt, q): + """Gives default output data type for given input data + type `inp_dt` when accumulation is performed on queue `q` + and the accumulation supports only floating-point data types + """ + inp_kind = inp_dt.kind + if inp_kind in "biu": + res_dt = dpt.dtype(ti.default_device_fp_type(q)) + can_cast_v = dpt_ext.can_cast(inp_dt, res_dt) + if not can_cast_v: + _fp64 = q.sycl_device.has_aspect_fp64 + res_dt = dpt.float64 if _fp64 else dpt.float32 + elif inp_kind in "f": + res_dt = inp_dt + elif inp_kind in "c": + raise ValueError("function not defined for complex types") + + return res_dt + + +__all__ = [ + "_find_buf_dtype", + "_find_buf_dtype2", + "_to_device_supported_dtype", + "_acceptance_fn_default_unary", + "_acceptance_fn_reciprocal", + "_acceptance_fn_default_binary", + "_acceptance_fn_divide", + "_acceptance_fn_negative", + "_acceptance_fn_subtract", + "_resolve_one_strong_one_weak_types", + "_resolve_one_strong_two_weak_types", + "_resolve_weak_types", + "_resolve_weak_types_all_py_ints", + "_weak_type_num_kind", + "_strong_dtype_num_kind", + "can_cast", + "finfo", + "iinfo", + "isdtype", + "result_type", + "WeakBooleanType", + "WeakIntegralType", + "WeakFloatingType", + "WeakComplexType", + "_default_accumulation_dtype", + "_default_accumulation_dtype_fp_types", + "_find_buf_dtype_in_place_op", +] diff --git a/dpctl_ext/tensor/libtensor/include/kernels/clip.hpp b/dpctl_ext/tensor/libtensor/include/kernels/clip.hpp new file mode 100644 index 000000000000..58a86a8f82d6 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/clip.hpp @@ -0,0 +1,357 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for dpctl.tensor.clip. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "kernels/alignment.hpp" +#include "utils/math_utils.hpp" +#include "utils/offset_utils.hpp" +#include "utils/sycl_utils.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::clip +{ + +using dpctl::tensor::ssize_t; +using namespace dpctl::tensor::offset_utils; + +using dpctl::tensor::kernels::alignment_utils:: + disabled_sg_loadstore_wrapper_krn; +using dpctl::tensor::kernels::alignment_utils::is_aligned; +using dpctl::tensor::kernels::alignment_utils::required_alignment; + +using dpctl::tensor::sycl_utils::sub_group_load; +using dpctl::tensor::sycl_utils::sub_group_store; + +template +T clip(const T &x, const T &min, const T &max) +{ + using dpctl::tensor::type_utils::is_complex; + if constexpr (is_complex::value) { + using dpctl::tensor::math_utils::max_complex; + using dpctl::tensor::math_utils::min_complex; + return min_complex(max_complex(x, min), max); + } + else if constexpr (std::is_floating_point_v || + std::is_same_v) { + auto tmp = (std::isnan(x) || x > min) ? x : min; + return (std::isnan(tmp) || tmp < max) ? tmp : max; + } + else if constexpr (std::is_same_v) { + return (x || min) && max; + } + else { + auto tmp = (x > min) ? x : min; + return (tmp < max) ? tmp : max; + } +} + +template +class ClipContigFunctor +{ +private: + std::size_t nelems = 0; + const T *x_p = nullptr; + const T *min_p = nullptr; + const T *max_p = nullptr; + T *dst_p = nullptr; + +public: + ClipContigFunctor(std::size_t nelems_, + const T *x_p_, + const T *min_p_, + const T *max_p_, + T *dst_p_) + : nelems(nelems_), x_p(x_p_), min_p(min_p_), max_p(max_p_), + dst_p(dst_p_) + { + } + + void operator()(sycl::nd_item<1> ndit) const + { + static constexpr std::uint8_t nelems_per_wi = n_vecs * vec_sz; + + using dpctl::tensor::type_utils::is_complex; + if constexpr (is_complex::value || !enable_sg_loadstore) { + const std::uint16_t sgSize = + ndit.get_sub_group().get_local_range()[0]; + const std::size_t gid = ndit.get_global_linear_id(); + const std::uint16_t nelems_per_sg = sgSize * nelems_per_wi; + + const std::size_t start = + (gid / sgSize) * (nelems_per_sg - sgSize) + gid; + const std::size_t end = std::min(nelems, start + nelems_per_sg); + + for (std::size_t offset = start; offset < end; offset += sgSize) { + dst_p[offset] = clip(x_p[offset], min_p[offset], max_p[offset]); + } + } + else { + auto sg = ndit.get_sub_group(); + const std::uint16_t sgSize = sg.get_max_local_range()[0]; + + const std::size_t base = + nelems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) + + sg.get_group_id()[0] * sgSize); + + if (base + nelems_per_wi * sgSize < nelems) { + sycl::vec dst_vec; +#pragma unroll + for (std::uint8_t it = 0; it < n_vecs * vec_sz; it += vec_sz) { + const std::size_t idx = base + it * sgSize; + auto x_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&x_p[idx]); + auto min_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&min_p[idx]); + auto max_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&max_p[idx]); + auto dst_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&dst_p[idx]); + + const sycl::vec x_vec = + sub_group_load(sg, x_multi_ptr); + const sycl::vec min_vec = + sub_group_load(sg, min_multi_ptr); + const sycl::vec max_vec = + sub_group_load(sg, max_multi_ptr); +#pragma unroll + for (std::uint8_t vec_id = 0; vec_id < vec_sz; ++vec_id) { + dst_vec[vec_id] = clip(x_vec[vec_id], min_vec[vec_id], + max_vec[vec_id]); + } + sub_group_store(sg, dst_vec, dst_multi_ptr); + } + } + else { + const std::size_t lane_id = sg.get_local_id()[0]; + for (std::size_t k = base + lane_id; k < nelems; k += sgSize) { + dst_p[k] = clip(x_p[k], min_p[k], max_p[k]); + } + } + } + } +}; + +template +class clip_contig_kernel; + +typedef sycl::event (*clip_contig_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + const char *, + const char *, + char *, + const std::vector &); + +template +sycl::event clip_contig_impl(sycl::queue &q, + std::size_t nelems, + const char *x_cp, + const char *min_cp, + const char *max_cp, + char *dst_cp, + const std::vector &depends) +{ + const T *x_tp = reinterpret_cast(x_cp); + const T *min_tp = reinterpret_cast(min_cp); + const T *max_tp = reinterpret_cast(max_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + sycl::event clip_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + std::size_t lws = 64; + static constexpr std::uint8_t vec_sz = 4; + static constexpr std::uint8_t n_vecs = 2; + const std::size_t n_groups = + ((nelems + lws * n_vecs * vec_sz - 1) / (lws * n_vecs * vec_sz)); + const auto gws_range = sycl::range<1>(n_groups * lws); + const auto lws_range = sycl::range<1>(lws); + + if (is_aligned(x_cp) && + is_aligned(min_cp) && + is_aligned(max_cp) && + is_aligned(dst_cp)) + { + static constexpr bool enable_sg_loadstore = true; + using KernelName = clip_contig_kernel; + using Impl = + ClipContigFunctor; + + cgh.parallel_for( + sycl::nd_range<1>(gws_range, lws_range), + Impl(nelems, x_tp, min_tp, max_tp, dst_tp)); + } + else { + static constexpr bool disable_sg_loadstore = false; + using InnerKernelName = clip_contig_kernel; + using KernelName = + disabled_sg_loadstore_wrapper_krn; + using Impl = + ClipContigFunctor; + + cgh.parallel_for( + sycl::nd_range<1>(gws_range, lws_range), + Impl(nelems, x_tp, min_tp, max_tp, dst_tp)); + } + }); + + return clip_ev; +} + +template +class ClipStridedFunctor +{ +private: + const T *x_p = nullptr; + const T *min_p = nullptr; + const T *max_p = nullptr; + T *dst_p = nullptr; + IndexerT indexer; + +public: + ClipStridedFunctor(const T *x_p_, + const T *min_p_, + const T *max_p_, + T *dst_p_, + const IndexerT &indexer_) + : x_p(x_p_), min_p(min_p_), max_p(max_p_), dst_p(dst_p_), + indexer(indexer_) + { + } + + void operator()(sycl::id<1> id) const + { + std::size_t gid = id[0]; + auto offsets = indexer(static_cast(gid)); + dst_p[offsets.get_fourth_offset()] = clip( + x_p[offsets.get_first_offset()], min_p[offsets.get_second_offset()], + max_p[offsets.get_third_offset()]); + } +}; + +template +class clip_strided_kernel; + +typedef sycl::event (*clip_strided_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + int, + const char *, + const char *, + const char *, + char *, + const ssize_t *, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event clip_strided_impl(sycl::queue &q, + std::size_t nelems, + int nd, + const char *x_cp, + const char *min_cp, + const char *max_cp, + char *dst_cp, + const ssize_t *shape_strides, + ssize_t x_offset, + ssize_t min_offset, + ssize_t max_offset, + ssize_t dst_offset, + const std::vector &depends) +{ + const T *x_tp = reinterpret_cast(x_cp); + const T *min_tp = reinterpret_cast(min_cp); + const T *max_tp = reinterpret_cast(max_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + sycl::event clip_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const FourOffsets_StridedIndexer indexer{ + nd, x_offset, min_offset, max_offset, dst_offset, shape_strides}; + + using KernelName = clip_strided_kernel; + using Impl = ClipStridedFunctor; + + cgh.parallel_for( + sycl::range<1>(nelems), + Impl(x_tp, min_tp, max_tp, dst_tp, indexer)); + }); + + return clip_ev; +} + +template +struct ClipStridedFactory +{ + fnT get() + { + fnT fn = clip_strided_impl; + return fn; + } +}; + +template +struct ClipContigFactory +{ + fnT get() + { + + fnT fn = clip_contig_impl; + return fn; + } +}; + +} // namespace dpctl::tensor::kernels::clip diff --git a/dpctl_ext/tensor/libtensor/include/kernels/repeat.hpp b/dpctl_ext/tensor/libtensor/include/kernels/repeat.hpp new file mode 100644 index 000000000000..83a520adb538 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/repeat.hpp @@ -0,0 +1,460 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for tensor repeating operations. +//===----------------------------------------------------------------------===// + +#pragma once +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "utils/offset_utils.hpp" + +namespace dpctl::tensor::kernels::repeat +{ + +using dpctl::tensor::ssize_t; +using namespace dpctl::tensor::offset_utils; + +template +class repeat_by_sequence_kernel; + +template +class RepeatSequenceFunctor +{ +private: + const T *src = nullptr; + T *dst = nullptr; + const repT *reps = nullptr; + const repT *cumsum = nullptr; + std::size_t src_axis_nelems = 1; + OrthogIndexer orthog_strider; + SrcAxisIndexer src_axis_strider; + DstAxisIndexer dst_axis_strider; + RepIndexer reps_strider; + +public: + RepeatSequenceFunctor(const T *src_, + T *dst_, + const repT *reps_, + const repT *cumsum_, + std::size_t src_axis_nelems_, + const OrthogIndexer &orthog_strider_, + const SrcAxisIndexer &src_axis_strider_, + const DstAxisIndexer &dst_axis_strider_, + const RepIndexer &reps_strider_) + : src(src_), dst(dst_), reps(reps_), cumsum(cumsum_), + src_axis_nelems(src_axis_nelems_), orthog_strider(orthog_strider_), + src_axis_strider(src_axis_strider_), + dst_axis_strider(dst_axis_strider_), reps_strider(reps_strider_) + { + } + + void operator()(sycl::id<1> idx) const + { + std::size_t id = idx[0]; + auto i_orthog = id / src_axis_nelems; + auto i_along = id - (i_orthog * src_axis_nelems); + + auto orthog_offsets = orthog_strider(i_orthog); + auto src_offset = orthog_offsets.get_first_offset(); + auto dst_offset = orthog_offsets.get_second_offset(); + + auto val = src[src_offset + src_axis_strider(i_along)]; + auto last = cumsum[i_along]; + auto first = last - reps[reps_strider(i_along)]; + for (auto i = first; i < last; ++i) { + dst[dst_offset + dst_axis_strider(i)] = val; + } + } +}; + +typedef sycl::event (*repeat_by_sequence_fn_ptr_t)( + sycl::queue &, + std::size_t, + std::size_t, + const char *, + char *, + const char *, + const char *, + int, + const ssize_t *, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event + repeat_by_sequence_impl(sycl::queue &q, + std::size_t orthog_nelems, + std::size_t src_axis_nelems, + const char *src_cp, + char *dst_cp, + const char *reps_cp, + const char *cumsum_cp, + int orthog_nd, + const ssize_t *orthog_src_dst_shape_and_strides, + ssize_t src_offset, + ssize_t dst_offset, + ssize_t src_axis_shape, + ssize_t src_axis_stride, + ssize_t dst_axis_shape, + ssize_t dst_axis_stride, + ssize_t reps_shape, + ssize_t reps_stride, + const std::vector &depends) +{ + sycl::event repeat_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const T *src_tp = reinterpret_cast(src_cp); + const repT *reps_tp = reinterpret_cast(reps_cp); + const repT *cumsum_tp = reinterpret_cast(cumsum_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + // orthog ndim indexer + const TwoOffsets_StridedIndexer orthog_indexer{ + orthog_nd, src_offset, dst_offset, + orthog_src_dst_shape_and_strides}; + // indexers along repeated axis + const Strided1DIndexer src_axis_indexer{/* size */ src_axis_shape, + /* step */ src_axis_stride}; + const Strided1DIndexer dst_axis_indexer{/* size */ dst_axis_shape, + /* step */ dst_axis_stride}; + // indexer along reps array + const Strided1DIndexer reps_indexer{/* size */ reps_shape, + /* step */ reps_stride}; + + const std::size_t gws = orthog_nelems * src_axis_nelems; + + cgh.parallel_for>( + sycl::range<1>(gws), + RepeatSequenceFunctor( + src_tp, dst_tp, reps_tp, cumsum_tp, src_axis_nelems, + orthog_indexer, src_axis_indexer, dst_axis_indexer, + reps_indexer)); + }); + + return repeat_ev; +} + +template +struct RepeatSequenceFactory +{ + fnT get() + { + fnT fn = repeat_by_sequence_impl; + return fn; + } +}; + +typedef sycl::event (*repeat_by_sequence_1d_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + char *, + const char *, + const char *, + int, + const ssize_t *, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event repeat_by_sequence_1d_impl(sycl::queue &q, + std::size_t src_nelems, + const char *src_cp, + char *dst_cp, + const char *reps_cp, + const char *cumsum_cp, + int src_nd, + const ssize_t *src_shape_strides, + ssize_t dst_shape, + ssize_t dst_stride, + ssize_t reps_shape, + ssize_t reps_stride, + const std::vector &depends) +{ + sycl::event repeat_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const T *src_tp = reinterpret_cast(src_cp); + const repT *reps_tp = reinterpret_cast(reps_cp); + const repT *cumsum_tp = reinterpret_cast(cumsum_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + // orthog ndim indexer + static constexpr TwoZeroOffsets_Indexer orthog_indexer{}; + // indexers along repeated axis + const StridedIndexer src_indexer{src_nd, 0, src_shape_strides}; + const Strided1DIndexer dst_indexer{/* size */ dst_shape, + /* step */ dst_stride}; + // indexer along reps array + const Strided1DIndexer reps_indexer{/* size */ reps_shape, + /* step */ reps_stride}; + + const std::size_t gws = src_nelems; + + cgh.parallel_for>( + sycl::range<1>(gws), + RepeatSequenceFunctor( + src_tp, dst_tp, reps_tp, cumsum_tp, src_nelems, orthog_indexer, + src_indexer, dst_indexer, reps_indexer)); + }); + + return repeat_ev; +} + +template +struct RepeatSequence1DFactory +{ + fnT get() + { + fnT fn = repeat_by_sequence_1d_impl; + return fn; + } +}; + +template +class repeat_by_scalar_kernel; + +template +class RepeatScalarFunctor +{ +private: + const T *src = nullptr; + T *dst = nullptr; + ssize_t reps = 1; + std::size_t dst_axis_nelems = 0; + OrthogIndexer orthog_strider; + SrcAxisIndexer src_axis_strider; + DstAxisIndexer dst_axis_strider; + +public: + RepeatScalarFunctor(const T *src_, + T *dst_, + const ssize_t reps_, + std::size_t dst_axis_nelems_, + const OrthogIndexer &orthog_strider_, + const SrcAxisIndexer &src_axis_strider_, + const DstAxisIndexer &dst_axis_strider_) + : src(src_), dst(dst_), reps(reps_), dst_axis_nelems(dst_axis_nelems_), + orthog_strider(orthog_strider_), src_axis_strider(src_axis_strider_), + dst_axis_strider(dst_axis_strider_) + { + } + + void operator()(sycl::id<1> idx) const + { + std::size_t id = idx[0]; + auto i_orthog = id / dst_axis_nelems; + auto i_along = id - (i_orthog * dst_axis_nelems); + + auto orthog_offsets = orthog_strider(i_orthog); + auto src_offset = orthog_offsets.get_first_offset(); + auto dst_offset = orthog_offsets.get_second_offset(); + + auto dst_axis_offset = dst_axis_strider(i_along); + auto src_axis_offset = src_axis_strider(i_along / reps); + dst[dst_offset + dst_axis_offset] = src[src_offset + src_axis_offset]; + } +}; + +typedef sycl::event (*repeat_by_scalar_fn_ptr_t)( + sycl::queue &, + std::size_t, + std::size_t, + const char *, + char *, + const ssize_t, + int, + const ssize_t *, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event repeat_by_scalar_impl(sycl::queue &q, + std::size_t orthog_nelems, + std::size_t dst_axis_nelems, + const char *src_cp, + char *dst_cp, + const ssize_t reps, + int orthog_nd, + const ssize_t *orthog_shape_and_strides, + ssize_t src_offset, + ssize_t dst_offset, + ssize_t src_axis_shape, + ssize_t src_axis_stride, + ssize_t dst_axis_shape, + ssize_t dst_axis_stride, + const std::vector &depends) +{ + sycl::event repeat_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const T *src_tp = reinterpret_cast(src_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + // orthog ndim indexer + const TwoOffsets_StridedIndexer orthog_indexer{ + orthog_nd, src_offset, dst_offset, orthog_shape_and_strides}; + // indexers along repeated axis + const Strided1DIndexer src_axis_indexer{/* size */ src_axis_shape, + /* step */ src_axis_stride}; + const Strided1DIndexer dst_axis_indexer{/* size */ dst_axis_shape, + /* step */ dst_axis_stride}; + + const std::size_t gws = orthog_nelems * dst_axis_nelems; + + cgh.parallel_for>( + sycl::range<1>(gws), + RepeatScalarFunctor( + src_tp, dst_tp, reps, dst_axis_nelems, orthog_indexer, + src_axis_indexer, dst_axis_indexer)); + }); + + return repeat_ev; +} + +template +struct RepeatScalarFactory +{ + fnT get() + { + fnT fn = repeat_by_scalar_impl; + return fn; + } +}; + +typedef sycl::event (*repeat_by_scalar_1d_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + char *, + const ssize_t, + int, + const ssize_t *, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event repeat_by_scalar_1d_impl(sycl::queue &q, + std::size_t dst_nelems, + const char *src_cp, + char *dst_cp, + const ssize_t reps, + int src_nd, + const ssize_t *src_shape_strides, + ssize_t dst_shape, + ssize_t dst_stride, + const std::vector &depends) +{ + sycl::event repeat_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const T *src_tp = reinterpret_cast(src_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + // orthog ndim indexer + static constexpr TwoZeroOffsets_Indexer orthog_indexer{}; + // indexers along repeated axis + const StridedIndexer src_indexer(src_nd, 0, src_shape_strides); + const Strided1DIndexer dst_indexer{/* size */ dst_shape, + /* step */ dst_stride}; + + const std::size_t gws = dst_nelems; + + cgh.parallel_for>( + sycl::range<1>(gws), + RepeatScalarFunctor(src_tp, dst_tp, reps, + dst_nelems, orthog_indexer, + src_indexer, dst_indexer)); + }); + + return repeat_ev; +} + +template +struct RepeatScalar1DFactory +{ + fnT get() + { + fnT fn = repeat_by_scalar_1d_impl; + return fn; + } +}; + +} // namespace dpctl::tensor::kernels::repeat diff --git a/dpctl_ext/tensor/libtensor/include/kernels/where.hpp b/dpctl_ext/tensor/libtensor/include/kernels/where.hpp new file mode 100644 index 000000000000..454e1e61fa0d --- /dev/null +++ b/dpctl_ext/tensor/libtensor/include/kernels/where.hpp @@ -0,0 +1,338 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines kernels for dpctl.tensor.where. +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include +#include +#include + +#include + +#include "dpctl_tensor_types.hpp" +#include "kernels/alignment.hpp" +#include "utils/offset_utils.hpp" +#include "utils/sycl_utils.hpp" +#include "utils/type_utils.hpp" + +namespace dpctl::tensor::kernels::search +{ + +using dpctl::tensor::ssize_t; +using namespace dpctl::tensor::offset_utils; + +using dpctl::tensor::kernels::alignment_utils:: + disabled_sg_loadstore_wrapper_krn; +using dpctl::tensor::kernels::alignment_utils::is_aligned; +using dpctl::tensor::kernels::alignment_utils::required_alignment; + +using dpctl::tensor::sycl_utils::sub_group_load; +using dpctl::tensor::sycl_utils::sub_group_store; + +template +class where_strided_kernel; +template +class where_contig_kernel; + +template +class WhereContigFunctor +{ +private: + std::size_t nelems = 0; + const condT *cond_p = nullptr; + const T *x1_p = nullptr; + const T *x2_p = nullptr; + T *dst_p = nullptr; + +public: + WhereContigFunctor(std::size_t nelems_, + const condT *cond_p_, + const T *x1_p_, + const T *x2_p_, + T *dst_p_) + : nelems(nelems_), cond_p(cond_p_), x1_p(x1_p_), x2_p(x2_p_), + dst_p(dst_p_) + { + } + + void operator()(sycl::nd_item<1> ndit) const + { + static constexpr std::uint8_t nelems_per_wi = n_vecs * vec_sz; + + using dpctl::tensor::type_utils::is_complex; + if constexpr (!enable_sg_loadstore || is_complex::value || + is_complex::value) + { + const std::uint16_t sgSize = + ndit.get_sub_group().get_local_range()[0]; + const std::size_t gid = ndit.get_global_linear_id(); + + const std::uint16_t nelems_per_sg = sgSize * nelems_per_wi; + const std::size_t start = + (gid / sgSize) * (nelems_per_sg - sgSize) + gid; + const std::size_t end = std::min(nelems, start + nelems_per_sg); + for (std::size_t offset = start; offset < end; offset += sgSize) { + using dpctl::tensor::type_utils::convert_impl; + const bool check = convert_impl(cond_p[offset]); + dst_p[offset] = check ? x1_p[offset] : x2_p[offset]; + } + } + else { + auto sg = ndit.get_sub_group(); + const std::uint16_t sgSize = sg.get_max_local_range()[0]; + + const std::size_t base = + nelems_per_wi * (ndit.get_group(0) * ndit.get_local_range(0) + + sg.get_group_id()[0] * sgSize); + + if (base + nelems_per_wi * sgSize < nelems) { + sycl::vec dst_vec; + +#pragma unroll + for (std::uint8_t it = 0; it < n_vecs * vec_sz; it += vec_sz) { + const std::size_t idx = base + it * sgSize; + auto x1_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&x1_p[idx]); + auto x2_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&x2_p[idx]); + auto cond_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&cond_p[idx]); + auto dst_multi_ptr = sycl::address_space_cast< + sycl::access::address_space::global_space, + sycl::access::decorated::yes>(&dst_p[idx]); + + const sycl::vec x1_vec = + sub_group_load(sg, x1_multi_ptr); + const sycl::vec x2_vec = + sub_group_load(sg, x2_multi_ptr); + const sycl::vec cond_vec = + sub_group_load(sg, cond_multi_ptr); +#pragma unroll + for (std::uint8_t k = 0; k < vec_sz; ++k) { + dst_vec[k] = cond_vec[k] ? x1_vec[k] : x2_vec[k]; + } + sub_group_store(sg, dst_vec, dst_multi_ptr); + } + } + else { + const std::size_t lane_id = sg.get_local_id()[0]; + for (std::size_t k = base + lane_id; k < nelems; k += sgSize) { + dst_p[k] = cond_p[k] ? x1_p[k] : x2_p[k]; + } + } + } + } +}; + +typedef sycl::event (*where_contig_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + const char *, + const char *, + const char *, + char *, + const std::vector &); + +template +sycl::event where_contig_impl(sycl::queue &q, + std::size_t nelems, + const char *cond_cp, + const char *x1_cp, + const char *x2_cp, + char *dst_cp, + const std::vector &depends) +{ + const condT *cond_tp = reinterpret_cast(cond_cp); + const T *x1_tp = reinterpret_cast(x1_cp); + const T *x2_tp = reinterpret_cast(x2_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + sycl::event where_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + std::size_t lws = 64; + static constexpr std::uint8_t vec_sz = 4u; + static constexpr std::uint8_t n_vecs = 2u; + const std::size_t n_groups = + ((nelems + lws * n_vecs * vec_sz - 1) / (lws * n_vecs * vec_sz)); + const auto gws_range = sycl::range<1>(n_groups * lws); + const auto lws_range = sycl::range<1>(lws); + + if (is_aligned(cond_cp) && + is_aligned(x1_cp) && + is_aligned(x2_cp) && + is_aligned(dst_cp)) + { + static constexpr bool enable_sg_loadstore = true; + using KernelName = where_contig_kernel; + + cgh.parallel_for( + sycl::nd_range<1>(gws_range, lws_range), + WhereContigFunctor(nelems, cond_tp, x1_tp, + x2_tp, dst_tp)); + } + else { + static constexpr bool disable_sg_loadstore = false; + using InnerKernelName = + where_contig_kernel; + using KernelName = + disabled_sg_loadstore_wrapper_krn; + + cgh.parallel_for( + sycl::nd_range<1>(gws_range, lws_range), + WhereContigFunctor(nelems, cond_tp, x1_tp, + x2_tp, dst_tp)); + } + }); + + return where_ev; +} + +template +class WhereStridedFunctor +{ +private: + const T *x1_p = nullptr; + const T *x2_p = nullptr; + T *dst_p = nullptr; + const condT *cond_p = nullptr; + IndexerT indexer; + +public: + WhereStridedFunctor(const condT *cond_p_, + const T *x1_p_, + const T *x2_p_, + T *dst_p_, + const IndexerT &indexer_) + : x1_p(x1_p_), x2_p(x2_p_), dst_p(dst_p_), cond_p(cond_p_), + indexer(indexer_) + { + } + + void operator()(sycl::id<1> id) const + { + std::size_t gid = id[0]; + auto offsets = indexer(static_cast(gid)); + + using dpctl::tensor::type_utils::convert_impl; + bool check = + convert_impl(cond_p[offsets.get_first_offset()]); + + dst_p[offsets.get_fourth_offset()] = + check ? x1_p[offsets.get_second_offset()] + : x2_p[offsets.get_third_offset()]; + } +}; + +typedef sycl::event (*where_strided_impl_fn_ptr_t)( + sycl::queue &, + std::size_t, + int, + const char *, + const char *, + const char *, + char *, + const ssize_t *, + ssize_t, + ssize_t, + ssize_t, + ssize_t, + const std::vector &); + +template +sycl::event where_strided_impl(sycl::queue &q, + std::size_t nelems, + int nd, + const char *cond_cp, + const char *x1_cp, + const char *x2_cp, + char *dst_cp, + const ssize_t *shape_strides, + ssize_t x1_offset, + ssize_t x2_offset, + ssize_t cond_offset, + ssize_t dst_offset, + const std::vector &depends) +{ + const condT *cond_tp = reinterpret_cast(cond_cp); + const T *x1_tp = reinterpret_cast(x1_cp); + const T *x2_tp = reinterpret_cast(x2_cp); + T *dst_tp = reinterpret_cast(dst_cp); + + sycl::event where_ev = q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + + const FourOffsets_StridedIndexer indexer{ + nd, cond_offset, x1_offset, x2_offset, dst_offset, shape_strides}; + + cgh.parallel_for< + where_strided_kernel>( + sycl::range<1>(nelems), + WhereStridedFunctor( + cond_tp, x1_tp, x2_tp, dst_tp, indexer)); + }); + + return where_ev; +} + +template +struct WhereStridedFactory +{ + fnT get() + { + fnT fn = where_strided_impl; + return fn; + } +}; + +template +struct WhereContigFactory +{ + fnT get() + { + fnT fn = where_contig_impl; + return fn; + } +}; + +} // namespace dpctl::tensor::kernels::search diff --git a/dpctl_ext/tensor/libtensor/source/clip.cpp b/dpctl_ext/tensor/libtensor/source/clip.cpp new file mode 100644 index 000000000000..3e1c5e8cd262 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/clip.cpp @@ -0,0 +1,265 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines Python API for implementation functions of +/// dpctl.tensor.clip +//===---------------------------------------------------------------------===// + +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "clip.hpp" +#include "kernels/clip.hpp" +#include "simplify_iteration_space.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::clip::clip_contig_impl_fn_ptr_t; +using dpctl::tensor::kernels::clip::clip_strided_impl_fn_ptr_t; + +static clip_contig_impl_fn_ptr_t clip_contig_dispatch_vector[td_ns::num_types]; +static clip_strided_impl_fn_ptr_t + clip_strided_dispatch_vector[td_ns::num_types]; + +void init_clip_dispatch_vectors(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::clip::ClipContigFactory; + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(clip_contig_dispatch_vector); + + using dpctl::tensor::kernels::clip::ClipStridedFactory; + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(clip_strided_dispatch_vector); +} + +using dpctl::utils::keep_args_alive; + +std::pair + py_clip(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &min, + const dpctl::tensor::usm_ndarray &max, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, min, max, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + int nd = src.get_ndim(); + int min_nd = min.get_ndim(); + int max_nd = max.get_ndim(); + int dst_nd = dst.get_ndim(); + + if (nd != min_nd || nd != max_nd) { + throw py::value_error( + "Input arrays are not of appropriate dimension for clip kernel."); + } + + if (nd != dst_nd) { + throw py::value_error( + "Destination is not of appropriate dimension for clip kernel."); + } + + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *min_shape = min.get_shape_raw(); + const py::ssize_t *max_shape = max.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + + bool shapes_equal(true); + std::size_t nelems(1); + for (int i = 0; i < nd; ++i) { + const auto &sh_i = dst_shape[i]; + nelems *= static_cast(sh_i); + shapes_equal = shapes_equal && (min_shape[i] == sh_i) && + (max_shape[i] == sh_i) && (src_shape[i] == sh_i); + } + + if (!shapes_equal) { + throw py::value_error("Arrays are not of matching shapes."); + } + + if (nelems == 0) { + return std::make_pair(sycl::event{}, sycl::event{}); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + auto const &same_logical_tensors = + dpctl::tensor::overlap::SameLogicalTensors(); + if ((overlap(dst, src) && !same_logical_tensors(dst, src)) || + (overlap(dst, min) && !same_logical_tensors(dst, min)) || + (overlap(dst, max) && !same_logical_tensors(dst, max))) + { + throw py::value_error("Destination array overlaps with input."); + } + + int min_typenum = min.get_typenum(); + int max_typenum = max.get_typenum(); + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int min_typeid = array_types.typenum_to_lookup_id(min_typenum); + int max_typeid = array_types.typenum_to_lookup_id(max_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + if (src_typeid != dst_typeid || src_typeid != min_typeid || + src_typeid != max_typeid) + { + throw py::value_error("Input, min, max, and destination arrays must " + "have the same data type"); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, nelems); + + char *src_data = src.get_data(); + char *min_data = min.get_data(); + char *max_data = max.get_data(); + char *dst_data = dst.get_data(); + + bool is_min_c_contig = min.is_c_contiguous(); + bool is_min_f_contig = min.is_f_contiguous(); + + bool is_max_c_contig = max.is_c_contiguous(); + bool is_max_f_contig = max.is_f_contiguous(); + + bool is_src_c_contig = src.is_c_contiguous(); + bool is_src_f_contig = src.is_f_contiguous(); + + bool is_dst_c_contig = dst.is_c_contiguous(); + bool is_dst_f_contig = dst.is_f_contiguous(); + + bool all_c_contig = (is_min_c_contig && is_max_c_contig && + is_src_c_contig && is_dst_c_contig); + bool all_f_contig = (is_min_f_contig && is_max_f_contig && + is_src_f_contig && is_dst_f_contig); + + if (all_c_contig || all_f_contig) { + auto fn = clip_contig_dispatch_vector[src_typeid]; + + sycl::event clip_ev = + fn(exec_q, nelems, src_data, min_data, max_data, dst_data, depends); + sycl::event ht_ev = + keep_args_alive(exec_q, {src, min, max, dst}, {clip_ev}); + + return std::make_pair(ht_ev, clip_ev); + } + + auto const &src_strides = src.get_strides_vector(); + auto const &min_strides = min.get_strides_vector(); + auto const &max_strides = max.get_strides_vector(); + auto const &dst_strides = dst.get_strides_vector(); + + using shT = std::vector; + shT simplified_shape; + shT simplified_src_strides; + shT simplified_min_strides; + shT simplified_max_strides; + shT simplified_dst_strides; + py::ssize_t src_offset(0); + py::ssize_t min_offset(0); + py::ssize_t max_offset(0); + py::ssize_t dst_offset(0); + + simplify_iteration_space_4( + nd, src_shape, src_strides, min_strides, max_strides, dst_strides, + // outputs + simplified_shape, simplified_src_strides, simplified_min_strides, + simplified_max_strides, simplified_dst_strides, src_offset, min_offset, + max_offset, dst_offset); + + auto fn = clip_strided_dispatch_vector[src_typeid]; + + std::vector host_task_events; + host_task_events.reserve(2); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, + // common shape and strides + simplified_shape, simplified_src_strides, simplified_min_strides, + simplified_max_strides, simplified_dst_strides); + auto packed_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_strides_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *packed_shape_strides = packed_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shape_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + sycl::event clip_ev = fn(exec_q, nelems, nd, src_data, min_data, max_data, + dst_data, packed_shape_strides, src_offset, + min_offset, max_offset, dst_offset, all_deps); + + // free packed temporaries + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {clip_ev}, packed_shape_strides_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + sycl::event arg_cleanup_ev = + keep_args_alive(exec_q, {src, min, max, dst}, host_task_events); + + return std::make_pair(arg_cleanup_ev, clip_ev); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/clip.hpp b/dpctl_ext/tensor/libtensor/source/clip.hpp new file mode 100644 index 000000000000..de8f0e559b6e --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/clip.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines Python API for implementation functions of +/// dpctl.tensor.clip +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + py_clip(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &min, + const dpctl::tensor::usm_ndarray &max, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends); + +extern void init_clip_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/repeat.cpp b/dpctl_ext/tensor/libtensor/source/repeat.cpp new file mode 100644 index 000000000000..919f51f9a4d1 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/repeat.cpp @@ -0,0 +1,820 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "kernels/repeat.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +#include "simplify_iteration_space.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::repeat::repeat_by_sequence_fn_ptr_t; +static repeat_by_sequence_fn_ptr_t + repeat_by_sequence_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::repeat::repeat_by_sequence_1d_fn_ptr_t; +static repeat_by_sequence_1d_fn_ptr_t + repeat_by_sequence_1d_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::repeat::repeat_by_scalar_fn_ptr_t; +static repeat_by_scalar_fn_ptr_t + repeat_by_scalar_dispatch_vector[td_ns::num_types]; + +using dpctl::tensor::kernels::repeat::repeat_by_scalar_1d_fn_ptr_t; +static repeat_by_scalar_1d_fn_ptr_t + repeat_by_scalar_1d_dispatch_vector[td_ns::num_types]; + +void init_repeat_dispatch_vectors(void) +{ + using dpctl::tensor::kernels::repeat::RepeatSequenceFactory; + td_ns::DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(repeat_by_sequence_dispatch_vector); + + using dpctl::tensor::kernels::repeat::RepeatSequence1DFactory; + td_ns::DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(repeat_by_sequence_1d_dispatch_vector); + + using dpctl::tensor::kernels::repeat::RepeatScalarFactory; + td_ns::DispatchVectorBuilder + dvb3; + dvb3.populate_dispatch_vector(repeat_by_scalar_dispatch_vector); + + using dpctl::tensor::kernels::repeat::RepeatScalar1DFactory; + td_ns::DispatchVectorBuilder + dvb4; + dvb4.populate_dispatch_vector(repeat_by_scalar_1d_dispatch_vector); +} + +std::pair + py_repeat_by_sequence(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &reps, + const dpctl::tensor::usm_ndarray &cumsum, + int axis, + sycl::queue &exec_q, + const std::vector &depends) +{ + int src_nd = src.get_ndim(); + if (axis < 0 || (axis + 1 > src_nd && src_nd > 0) || + (axis > 0 && src_nd == 0)) { + throw py::value_error("Specified axis is invalid."); + } + + int dst_nd = dst.get_ndim(); + if ((src_nd != dst_nd && src_nd > 0) || (src_nd == 0 && dst_nd > 1)) { + throw py::value_error("Number of dimensions of source and destination " + "arrays is not consistent"); + } + + int reps_nd = reps.get_ndim(); + if (reps_nd != 1) { + throw py::value_error("`reps` array must be 1-dimensional"); + } + + if (cumsum.get_ndim() != 1) { + throw py::value_error("`cumsum` array must be 1-dimensional."); + } + + if (!cumsum.is_c_contiguous()) { + throw py::value_error("Expecting `cumsum` array to be C-contiguous."); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, reps, cumsum, dst})) + { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + std::size_t reps_sz = reps.get_size(); + std::size_t cumsum_sz = cumsum.get_size(); + + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + bool same_orthog_dims(true); + std::size_t orthog_nelems(1); // number of orthogonal iterations + for (auto i = 0; i < axis; ++i) { + auto src_sh_i = src_shape[i]; + orthog_nelems *= src_sh_i; + same_orthog_dims = same_orthog_dims && (src_sh_i == dst_shape[i]); + } + for (auto i = axis + 1; i < src_nd; ++i) { + auto src_sh_i = src_shape[i]; + orthog_nelems *= src_sh_i; + same_orthog_dims = same_orthog_dims && (src_sh_i == dst_shape[i]); + } + + std::size_t src_axis_nelems(1); + if (src_nd > 0) { + src_axis_nelems = src_shape[axis]; + } + std::size_t dst_axis_nelems(dst_shape[axis]); + + // shape at repeated axis must be equal to the sum of reps + if (!same_orthog_dims || src_axis_nelems != reps_sz || + src_axis_nelems != cumsum_sz) + { + throw py::value_error("Inconsistent array dimensions"); + } + + if (orthog_nelems == 0 || src_axis_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + dst, orthog_nelems * dst_axis_nelems); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src or reps + if (overlap(dst, src) || overlap(dst, reps) || overlap(dst, cumsum)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + int reps_typenum = reps.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + int reps_typeid = array_types.typenum_to_lookup_id(reps_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + if (src_typeid != dst_typeid) { + throw py::value_error( + "Destination array must have the same elemental data type"); + } + + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int64_typeid) { + throw py::value_error( + "Unexpected data type of `cumsum` array, expecting " + "'int64'"); + } + + if (reps_typeid != cumsum_typeid) { + throw py::value_error("`reps` array must have the same elemental " + "data type as cumsum"); + } + + const char *src_data_p = src.get_data(); + const char *reps_data_p = reps.get_data(); + const char *cumsum_data_p = cumsum.get_data(); + char *dst_data_p = dst.get_data(); + + auto src_shape_vec = src.get_shape_vector(); + auto src_strides_vec = src.get_strides_vector(); + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + auto reps_shape_vec = reps.get_shape_vector(); + auto reps_strides_vec = reps.get_strides_vector(); + + sycl::event repeat_ev; + std::vector host_task_events{}; + if (axis == 0 && src_nd < 2) { + // empty orthogonal directions + + auto fn = repeat_by_sequence_1d_dispatch_vector[src_typeid]; + + assert(dst_shape_vec.size() == 1); + assert(dst_strides_vec.size() == 1); + + if (src_nd == 0) { + src_shape_vec = {0}; + src_strides_vec = {0}; + } + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, src_shape_vec, src_strides_vec); + auto packed_src_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_src_shape_strides = + packed_src_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + repeat_ev = + fn(exec_q, src_axis_nelems, src_data_p, dst_data_p, reps_data_p, + cumsum_data_p, src_nd, packed_src_shape_strides, + dst_shape_vec[0], dst_strides_vec[0], reps_shape_vec[0], + reps_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_src_shape_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + else { + // non-empty orthogonal directions + + auto fn = repeat_by_sequence_dispatch_vector[src_typeid]; + + int orthog_nd = src_nd - 1; + + using shT = std::vector; + shT orthog_src_shape; + shT orthog_src_strides; + shT axis_src_shape; + shT axis_src_stride; + split_iteration_space(src_shape_vec, src_strides_vec, axis, axis + 1, + orthog_src_shape, axis_src_shape, + orthog_src_strides, axis_src_stride); + + shT orthog_dst_shape; + shT orthog_dst_strides; + shT axis_dst_shape; + shT axis_dst_stride; + split_iteration_space(dst_shape_vec, dst_strides_vec, axis, axis + 1, + orthog_dst_shape, axis_dst_shape, + orthog_dst_strides, axis_dst_stride); + + assert(orthog_src_shape.size() == static_cast(orthog_nd)); + assert(orthog_dst_shape.size() == static_cast(orthog_nd)); + assert(std::equal(orthog_src_shape.begin(), orthog_src_shape.end(), + orthog_dst_shape.begin())); + + shT simplified_orthog_shape; + shT simplified_orthog_src_strides; + shT simplified_orthog_dst_strides; + + const py::ssize_t *_shape = orthog_src_shape.data(); + + py::ssize_t orthog_src_offset(0); + py::ssize_t orthog_dst_offset(0); + simplify_iteration_space( + orthog_nd, _shape, orthog_src_strides, orthog_dst_strides, + // output + simplified_orthog_shape, simplified_orthog_src_strides, + simplified_orthog_dst_strides, orthog_src_offset, + orthog_dst_offset); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, simplified_orthog_shape, + simplified_orthog_src_strides, simplified_orthog_dst_strides); + auto packed_shapes_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_shapes_strides = + packed_shapes_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + repeat_ev = fn(exec_q, orthog_nelems, src_axis_nelems, src_data_p, + dst_data_p, reps_data_p, cumsum_data_p, + // data to build orthog indexer + orthog_nd, packed_shapes_strides, orthog_src_offset, + orthog_dst_offset, + // data to build indexers along repeated axis in src + axis_src_shape[0], axis_src_stride[0], + // data to build indexer along repeated axis in dst + axis_dst_shape[0], axis_dst_stride[0], + // data to build indexer for reps array + reps_shape_vec[0], reps_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_shapes_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + + sycl::event py_obj_management_host_task_ev = dpctl::utils::keep_args_alive( + exec_q, {src, reps, cumsum, dst}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, repeat_ev); +} + +std::pair + py_repeat_by_sequence(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &reps, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + const std::vector &depends) +{ + + int dst_nd = dst.get_ndim(); + if (dst_nd != 1) { + throw py::value_error( + "`dst` array must be 1-dimensional when repeating a full array"); + } + + int reps_nd = reps.get_ndim(); + if (reps_nd != 1) { + throw py::value_error("`reps` array must be 1-dimensional"); + } + + if (cumsum.get_ndim() != 1) { + throw py::value_error("`cumsum` array must be 1-dimensional."); + } + + if (!cumsum.is_c_contiguous()) { + throw py::value_error("Expecting `cumsum` array to be C-contiguous."); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, reps, cumsum, dst})) + { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + std::size_t src_sz = src.get_size(); + std::size_t reps_sz = reps.get_size(); + std::size_t cumsum_sz = cumsum.get_size(); + + // shape at repeated axis must be equal to the sum of reps + if (src_sz != reps_sz || src_sz != cumsum_sz) { + throw py::value_error("Inconsistent array dimensions"); + } + + if (src_sz == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, + dst.get_size()); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src, cumsum, or reps + if (overlap(dst, src) || overlap(dst, reps) || overlap(dst, cumsum)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + int reps_typenum = reps.get_typenum(); + int cumsum_typenum = cumsum.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + int reps_typeid = array_types.typenum_to_lookup_id(reps_typenum); + int cumsum_typeid = array_types.typenum_to_lookup_id(cumsum_typenum); + + if (src_typeid != dst_typeid) { + throw py::value_error( + "Destination array must have the same elemental data type"); + } + + static constexpr int int64_typeid = + static_cast(td_ns::typenum_t::INT64); + if (cumsum_typeid != int64_typeid) { + throw py::value_error( + "Unexpected data type of `cumsum` array, expecting " + "'int64'"); + } + + if (reps_typeid != cumsum_typeid) { + throw py::value_error("`reps` array must have the same elemental " + "data type as cumsum"); + } + + const char *src_data_p = src.get_data(); + const char *reps_data_p = reps.get_data(); + const char *cumsum_data_p = cumsum.get_data(); + char *dst_data_p = dst.get_data(); + + int src_nd = src.get_ndim(); + auto src_shape_vec = src.get_shape_vector(); + auto src_strides_vec = src.get_strides_vector(); + if (src_nd == 0) { + src_shape_vec = {0}; + src_strides_vec = {0}; + } + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + auto reps_shape_vec = reps.get_shape_vector(); + auto reps_strides_vec = reps.get_strides_vector(); + + std::vector host_task_events{}; + + auto fn = repeat_by_sequence_1d_dispatch_vector[src_typeid]; + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, src_shape_vec, src_strides_vec); + auto packed_src_shapes_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_src_shapes_strides = + packed_src_shapes_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + sycl::event repeat_ev = fn( + exec_q, src_sz, src_data_p, dst_data_p, reps_data_p, cumsum_data_p, + src_nd, packed_src_shapes_strides, dst_shape_vec[0], dst_strides_vec[0], + reps_shape_vec[0], reps_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_src_shapes_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + + sycl::event py_obj_management_host_task_ev = dpctl::utils::keep_args_alive( + exec_q, {src, reps, cumsum, dst}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, repeat_ev); +} + +std::pair + py_repeat_by_scalar(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const py::ssize_t reps, + int axis, + sycl::queue &exec_q, + const std::vector &depends) +{ + int src_nd = src.get_ndim(); + if (axis < 0 || (axis + 1 > src_nd && src_nd > 0) || + (axis > 0 && src_nd == 0)) { + throw py::value_error("Specified axis is invalid."); + } + + int dst_nd = dst.get_ndim(); + if ((src_nd != dst_nd && src_nd > 0) || (src_nd == 0 && dst_nd > 1)) { + throw py::value_error("Number of dimensions of source and destination " + "arrays is not consistent"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + const py::ssize_t *src_shape = src.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + bool same_orthog_dims(true); + std::size_t orthog_nelems(1); // number of orthogonal iterations + for (auto i = 0; i < axis; ++i) { + auto src_sh_i = src_shape[i]; + orthog_nelems *= src_sh_i; + same_orthog_dims = same_orthog_dims && (src_sh_i == dst_shape[i]); + } + for (auto i = axis + 1; i < src_nd; ++i) { + auto src_sh_i = src_shape[i]; + orthog_nelems *= src_sh_i; + same_orthog_dims = same_orthog_dims && (src_sh_i == dst_shape[i]); + } + + std::size_t src_axis_nelems(1); + if (src_nd > 0) { + src_axis_nelems = src_shape[axis]; + } + std::size_t dst_axis_nelems(dst_shape[axis]); + + // shape at repeated axis must be equal to the shape of src at the axis * + // reps + if (!same_orthog_dims || (src_axis_nelems * reps) != dst_axis_nelems) { + throw py::value_error("Inconsistent array dimensions"); + } + + if (orthog_nelems == 0 || src_axis_nelems == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( + dst, orthog_nelems * (src_axis_nelems * reps)); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src + if (overlap(dst, src)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + if (src_typeid != dst_typeid) { + throw py::value_error( + "Destination array must have the same elemental data type"); + } + + const char *src_data_p = src.get_data(); + char *dst_data_p = dst.get_data(); + + auto src_shape_vec = src.get_shape_vector(); + auto src_strides_vec = src.get_strides_vector(); + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + sycl::event repeat_ev; + std::vector host_task_events{}; + if (axis == 0 && src_nd < 2) { + // empty orthogonal directions + + auto fn = repeat_by_scalar_1d_dispatch_vector[src_typeid]; + + assert(dst_shape_vec.size() == 1); + assert(dst_strides_vec.size() == 1); + + if (src_nd == 0) { + src_shape_vec = {0}; + src_strides_vec = {0}; + } + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, src_shape_vec, src_strides_vec); + auto packed_src_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_src_shape_strides = + packed_src_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + repeat_ev = fn(exec_q, dst_axis_nelems, src_data_p, dst_data_p, reps, + src_nd, packed_src_shape_strides, dst_shape_vec[0], + dst_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_src_shape_strides_owner); + + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + else { + // non-empty orthogonal directions + + auto fn = repeat_by_scalar_dispatch_vector[src_typeid]; + + int orthog_nd = src_nd - 1; + + using shT = std::vector; + shT orthog_src_shape; + shT orthog_src_strides; + shT axis_src_shape; + shT axis_src_stride; + split_iteration_space(src_shape_vec, src_strides_vec, axis, axis + 1, + orthog_src_shape, axis_src_shape, + orthog_src_strides, axis_src_stride); + + shT orthog_dst_shape; + shT orthog_dst_strides; + shT axis_dst_shape; + shT axis_dst_stride; + split_iteration_space(dst_shape_vec, dst_strides_vec, axis, axis + 1, + orthog_dst_shape, axis_dst_shape, + orthog_dst_strides, axis_dst_stride); + + assert(orthog_src_shape.size() == static_cast(orthog_nd)); + assert(orthog_dst_shape.size() == static_cast(orthog_nd)); + assert(std::equal(orthog_src_shape.begin(), orthog_src_shape.end(), + orthog_dst_shape.begin())); + + shT simplified_orthog_shape; + shT simplified_orthog_src_strides; + shT simplified_orthog_dst_strides; + + const py::ssize_t *_shape = orthog_src_shape.data(); + + py::ssize_t orthog_src_offset(0); + py::ssize_t orthog_dst_offset(0); + + simplify_iteration_space( + orthog_nd, _shape, orthog_src_strides, orthog_dst_strides, + // output + simplified_orthog_shape, simplified_orthog_src_strides, + simplified_orthog_dst_strides, orthog_src_offset, + orthog_dst_offset); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, simplified_orthog_shape, + simplified_orthog_src_strides, simplified_orthog_dst_strides); + auto packed_shapes_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_shapes_strides = + packed_shapes_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + repeat_ev = fn(exec_q, orthog_nelems, dst_axis_nelems, src_data_p, + dst_data_p, reps, + // data to build orthog indexer + orthog_nd, packed_shapes_strides, orthog_src_offset, + orthog_dst_offset, + // data to build indexer along repeated axis in src + axis_src_shape[0], axis_src_stride[0], + // data to build indexer along repeated axis in dst + axis_dst_shape[0], axis_dst_stride[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_shapes_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + } + + sycl::event py_obj_management_host_task_ev = + dpctl::utils::keep_args_alive(exec_q, {src, dst}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, repeat_ev); +} + +std::pair + py_repeat_by_scalar(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const py::ssize_t reps, + sycl::queue &exec_q, + const std::vector &depends) +{ + int dst_nd = dst.get_ndim(); + if (dst_nd != 1) { + throw py::value_error( + "`dst` array must be 1-dimensional when repeating a full array"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {src, dst})) { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + std::size_t src_sz = src.get_size(); + std::size_t dst_sz = dst.get_size(); + + // shape at repeated axis must be equal to the shape of src at the axis * + // reps + if ((src_sz * reps) != dst_sz) { + throw py::value_error("Inconsistent array dimensions"); + } + + if (src_sz == 0) { + return std::make_pair(sycl::event(), sycl::event()); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, + src_sz * reps); + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + // check that dst does not intersect with src + if (overlap(dst, src)) { + throw py::value_error("Destination array overlaps with inputs"); + } + + int src_typenum = src.get_typenum(); + int dst_typenum = dst.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int src_typeid = array_types.typenum_to_lookup_id(src_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + if (src_typeid != dst_typeid) { + throw py::value_error( + "Destination array must have the same elemental data type"); + } + + const char *src_data_p = src.get_data(); + char *dst_data_p = dst.get_data(); + + int src_nd = src.get_ndim(); + auto src_shape_vec = src.get_shape_vector(); + auto src_strides_vec = src.get_strides_vector(); + + if (src_nd == 0) { + src_shape_vec = {0}; + src_strides_vec = {0}; + } + + auto dst_shape_vec = dst.get_shape_vector(); + auto dst_strides_vec = dst.get_strides_vector(); + + std::vector host_task_events{}; + + auto fn = repeat_by_scalar_1d_dispatch_vector[src_typeid]; + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple1 = device_allocate_and_pack( + exec_q, host_task_events, src_shape_vec, src_strides_vec); + auto packed_src_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple1)); + sycl::event copy_shapes_strides_ev = std::get<2>(ptr_size_event_tuple1); + const py::ssize_t *packed_src_shape_strides = + packed_src_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shapes_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + sycl::event repeat_ev = fn(exec_q, dst_sz, src_data_p, dst_data_p, reps, + src_nd, packed_src_shape_strides, + dst_shape_vec[0], dst_strides_vec[0], all_deps); + + sycl::event cleanup_tmp_allocations_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {repeat_ev}, packed_src_shape_strides_owner); + host_task_events.push_back(cleanup_tmp_allocations_ev); + + sycl::event py_obj_management_host_task_ev = + dpctl::utils::keep_args_alive(exec_q, {src, dst}, host_task_events); + + return std::make_pair(py_obj_management_host_task_ev, repeat_ev); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/repeat.hpp b/dpctl_ext/tensor/libtensor/source/repeat.hpp new file mode 100644 index 000000000000..5835377fb29c --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/repeat.hpp @@ -0,0 +1,83 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===--------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===--------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern void init_repeat_dispatch_vectors(void); + +extern std::pair + py_repeat_by_sequence(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &reps, + const dpctl::tensor::usm_ndarray &cumsum, + int axis, + sycl::queue &exec_q, + const std::vector &depends); + +extern std::pair + py_repeat_by_sequence(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &reps, + const dpctl::tensor::usm_ndarray &cumsum, + sycl::queue &exec_q, + const std::vector &depends); + +extern std::pair + py_repeat_by_scalar(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const py::ssize_t reps, + int axis, + sycl::queue &exec_q, + const std::vector &depends); + +extern std::pair + py_repeat_by_scalar(const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const py::ssize_t reps, + sycl::queue &exec_q, + const std::vector &depends); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 7b151c773fe0..5e5b07c087f8 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -45,7 +45,7 @@ #include "accumulators.hpp" #include "boolean_advanced_indexing.hpp" -// #include "clip.hpp" +#include "clip.hpp" #include "copy_and_cast_usm_to_usm.hpp" #include "copy_as_contig.hpp" #include "copy_for_reshape.hpp" @@ -57,12 +57,12 @@ #include "integer_advanced_indexing.hpp" #include "kernels/dpctl_tensor_types.hpp" // #include "linear_sequences.hpp" -// #include "repeat.hpp" +#include "repeat.hpp" #include "simplify_iteration_space.hpp" #include "triul_ctor.hpp" #include "utils/memory_overlap.hpp" #include "utils/strided_iters.hpp" -// #include "where.hpp" +#include "where.hpp" #include "zeros_ctor.hpp" namespace py = pybind11; @@ -116,8 +116,8 @@ using dpctl::tensor::py_internal::py_place; /* ================= Repeat ====================*/ using dpctl::tensor::py_internal::py_cumsum_1d; -// using dpctl::tensor::py_internal::py_repeat_by_scalar; -// using dpctl::tensor::py_internal::py_repeat_by_sequence; +using dpctl::tensor::py_internal::py_repeat_by_scalar; +using dpctl::tensor::py_internal::py_repeat_by_sequence; /* ================ Eye ================== */ @@ -129,10 +129,10 @@ using dpctl::tensor::py_internal::usm_ndarray_triul; /* =========================== Where ============================== */ -// using dpctl::tensor::py_internal::py_where; +using dpctl::tensor::py_internal::py_where; /* =========================== Clip ============================== */ -// using dpctl::tensor::py_internal::py_clip; +using dpctl::tensor::py_internal::py_clip; // populate dispatch tables void init_dispatch_tables(void) @@ -142,7 +142,7 @@ void init_dispatch_tables(void) init_copy_and_cast_usm_to_usm_dispatch_tables(); init_copy_numpy_ndarray_into_usm_ndarray_dispatch_tables(); init_advanced_indexing_dispatch_tables(); - // init_where_dispatch_tables(); + init_where_dispatch_tables(); return; } @@ -166,9 +166,9 @@ void init_dispatch_vectors(void) populate_mask_positions_dispatch_vectors(); populate_cumsum_1d_dispatch_vectors(); - // init_repeat_dispatch_vectors(); + init_repeat_dispatch_vectors(); - // init_clip_dispatch_vectors(); + init_clip_dispatch_vectors(); return; } @@ -443,55 +443,53 @@ PYBIND11_MODULE(_tensor_impl, m) py::arg("mask_shape"), py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_where", &py_where, "", py::arg("condition"), py::arg("x1"), - // py::arg("x2"), py::arg("dst"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); - - // auto repeat_sequence = [](const dpctl::tensor::usm_ndarray &src, - // const dpctl::tensor::usm_ndarray &dst, - // const dpctl::tensor::usm_ndarray &reps, - // const dpctl::tensor::usm_ndarray &cumsum, - // std::optional axis, sycl::queue &exec_q, - // const std::vector depends) - // -> std::pair { - // if (axis) { - // return py_repeat_by_sequence(src, dst, reps, cumsum, - // axis.value(), - // exec_q, depends); - // } - // else { - // return py_repeat_by_sequence(src, dst, reps, cumsum, exec_q, - // depends); - // } - // }; - // m.def("_repeat_by_sequence", repeat_sequence, py::arg("src"), - // py::arg("dst"), py::arg("reps"), py::arg("cumsum"), - // py::arg("axis"), py::arg("sycl_queue"), py::arg("depends") = - // py::list()); - - // auto repeat_scalar = [](const dpctl::tensor::usm_ndarray &src, - // const dpctl::tensor::usm_ndarray &dst, - // const py::ssize_t reps, std::optional axis, - // sycl::queue &exec_q, - // const std::vector depends) - // -> std::pair { - // if (axis) { - // return py_repeat_by_scalar(src, dst, reps, axis.value(), exec_q, - // depends); - // } - // else { - // return py_repeat_by_scalar(src, dst, reps, exec_q, depends); - // } - // }; - // m.def("_repeat_by_scalar", repeat_scalar, py::arg("src"), py::arg("dst"), - // py::arg("reps"), py::arg("axis"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); - - // m.def("_clip", &py_clip, - // "Clamps elements of array `x` to the range " - // "[`min`, `max] and writes the result to the " - // "array `dst` for each element of `x`, `min`, and `max`." - // "Returns a tuple of events: (hev, ev)", - // py::arg("src"), py::arg("min"), py::arg("max"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); + m.def("_where", &py_where, "", py::arg("condition"), py::arg("x1"), + py::arg("x2"), py::arg("dst"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); + + auto repeat_sequence = [](const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const dpctl::tensor::usm_ndarray &reps, + const dpctl::tensor::usm_ndarray &cumsum, + std::optional axis, sycl::queue &exec_q, + const std::vector depends) + -> std::pair { + if (axis) { + return py_repeat_by_sequence(src, dst, reps, cumsum, axis.value(), + exec_q, depends); + } + else { + return py_repeat_by_sequence(src, dst, reps, cumsum, exec_q, + depends); + } + }; + m.def("_repeat_by_sequence", repeat_sequence, py::arg("src"), + py::arg("dst"), py::arg("reps"), py::arg("cumsum"), py::arg("axis"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + auto repeat_scalar = [](const dpctl::tensor::usm_ndarray &src, + const dpctl::tensor::usm_ndarray &dst, + const py::ssize_t reps, std::optional axis, + sycl::queue &exec_q, + const std::vector depends) + -> std::pair { + if (axis) { + return py_repeat_by_scalar(src, dst, reps, axis.value(), exec_q, + depends); + } + else { + return py_repeat_by_scalar(src, dst, reps, exec_q, depends); + } + }; + m.def("_repeat_by_scalar", repeat_scalar, py::arg("src"), py::arg("dst"), + py::arg("reps"), py::arg("axis"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); + + m.def("_clip", &py_clip, + "Clamps elements of array `x` to the range " + "[`min`, `max] and writes the result to the " + "array `dst` for each element of `x`, `min`, and `max`." + "Returns a tuple of events: (hev, ev)", + py::arg("src"), py::arg("min"), py::arg("max"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); } diff --git a/dpctl_ext/tensor/libtensor/source/where.cpp b/dpctl_ext/tensor/libtensor/source/where.cpp new file mode 100644 index 000000000000..46c52cf83b34 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/where.cpp @@ -0,0 +1,265 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines Python API for implementation functions of +/// dpctl.tensor.where +//===---------------------------------------------------------------------===// + +#include +#include +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +#include "kernels/where.hpp" +#include "utils/memory_overlap.hpp" +#include "utils/offset_utils.hpp" +#include "utils/output_validation.hpp" +#include "utils/sycl_alloc_utils.hpp" +#include "utils/type_dispatch.hpp" + +#include "simplify_iteration_space.hpp" +#include "where.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +using dpctl::tensor::kernels::search::where_contig_impl_fn_ptr_t; +using dpctl::tensor::kernels::search::where_strided_impl_fn_ptr_t; + +static where_contig_impl_fn_ptr_t where_contig_dispatch_table[td_ns::num_types] + [td_ns::num_types]; +static where_strided_impl_fn_ptr_t + where_strided_dispatch_table[td_ns::num_types][td_ns::num_types]; + +using dpctl::utils::keep_args_alive; + +std::pair + py_where(const dpctl::tensor::usm_ndarray &condition, + const dpctl::tensor::usm_ndarray &x1, + const dpctl::tensor::usm_ndarray &x2, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + + if (!dpctl::utils::queues_are_compatible(exec_q, {x1, x2, condition, dst})) + { + throw py::value_error( + "Execution queue is not compatible with allocation queues"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + int nd = condition.get_ndim(); + int x1_nd = x1.get_ndim(); + int x2_nd = x2.get_ndim(); + int dst_nd = dst.get_ndim(); + + if (nd != x1_nd || nd != x2_nd) { + throw py::value_error( + "Input arrays are not of appropriate dimension for where kernel."); + } + + if (nd != dst_nd) { + throw py::value_error( + "Destination is not of appropriate dimension for where kernel."); + } + + const py::ssize_t *x1_shape = x1.get_shape_raw(); + const py::ssize_t *x2_shape = x2.get_shape_raw(); + const py::ssize_t *dst_shape = dst.get_shape_raw(); + const py::ssize_t *cond_shape = condition.get_shape_raw(); + + bool shapes_equal(true); + std::size_t nelems(1); + for (int i = 0; i < nd; ++i) { + const auto &sh_i = dst_shape[i]; + nelems *= static_cast(sh_i); + shapes_equal = shapes_equal && (x1_shape[i] == sh_i) && + (x2_shape[i] == sh_i) && (cond_shape[i] == sh_i); + } + + if (!shapes_equal) { + throw py::value_error("Axes are not of matching shapes."); + } + + if (nelems == 0) { + return std::make_pair(sycl::event{}, sycl::event{}); + } + + auto const &overlap = dpctl::tensor::overlap::MemoryOverlap(); + auto const &same_logical_tensors = + dpctl::tensor::overlap::SameLogicalTensors(); + if ((overlap(dst, condition) && !same_logical_tensors(dst, condition)) || + (overlap(dst, x1) && !same_logical_tensors(dst, x1)) || + (overlap(dst, x2) && !same_logical_tensors(dst, x2))) + { + throw py::value_error("Destination array overlaps with input."); + } + + int x1_typenum = x1.get_typenum(); + int x2_typenum = x2.get_typenum(); + int cond_typenum = condition.get_typenum(); + int dst_typenum = dst.get_typenum(); + + auto const &array_types = td_ns::usm_ndarray_types(); + int cond_typeid = array_types.typenum_to_lookup_id(cond_typenum); + int x1_typeid = array_types.typenum_to_lookup_id(x1_typenum); + int x2_typeid = array_types.typenum_to_lookup_id(x2_typenum); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + if (x1_typeid != x2_typeid || x1_typeid != dst_typeid) { + throw py::value_error("Value arrays must have the same data type"); + } + + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample(dst, nelems); + + char *cond_data = condition.get_data(); + char *x1_data = x1.get_data(); + char *x2_data = x2.get_data(); + char *dst_data = dst.get_data(); + + bool is_x1_c_contig = x1.is_c_contiguous(); + bool is_x1_f_contig = x1.is_f_contiguous(); + + bool is_x2_c_contig = x2.is_c_contiguous(); + bool is_x2_f_contig = x2.is_f_contiguous(); + + bool is_cond_c_contig = condition.is_c_contiguous(); + bool is_cond_f_contig = condition.is_f_contiguous(); + + bool is_dst_c_contig = dst.is_c_contiguous(); + bool is_dst_f_contig = dst.is_f_contiguous(); + + bool all_c_contig = (is_x1_c_contig && is_x2_c_contig && is_cond_c_contig && + is_dst_c_contig); + bool all_f_contig = (is_x1_f_contig && is_x2_f_contig && is_cond_f_contig && + is_dst_f_contig); + + if (all_c_contig || all_f_contig) { + auto contig_fn = where_contig_dispatch_table[x1_typeid][cond_typeid]; + + auto where_ev = contig_fn(exec_q, nelems, cond_data, x1_data, x2_data, + dst_data, depends); + sycl::event ht_ev = + keep_args_alive(exec_q, {x1, x2, dst, condition}, {where_ev}); + + return std::make_pair(ht_ev, where_ev); + } + + auto const &cond_strides = condition.get_strides_vector(); + auto const &x1_strides = x1.get_strides_vector(); + auto const &x2_strides = x2.get_strides_vector(); + auto const &dst_strides = dst.get_strides_vector(); + + using shT = std::vector; + shT simplified_shape; + shT simplified_cond_strides; + shT simplified_x1_strides; + shT simplified_x2_strides; + shT simplified_dst_strides; + py::ssize_t cond_offset(0); + py::ssize_t x1_offset(0); + py::ssize_t x2_offset(0); + py::ssize_t dst_offset(0); + + simplify_iteration_space_4( + nd, x1_shape, cond_strides, x1_strides, x2_strides, dst_strides, + // outputs + simplified_shape, simplified_cond_strides, simplified_x1_strides, + simplified_x2_strides, simplified_dst_strides, cond_offset, x1_offset, + x2_offset, dst_offset); + + auto fn = where_strided_dispatch_table[x1_typeid][cond_typeid]; + + std::vector host_task_events; + host_task_events.reserve(2); + + using dpctl::tensor::offset_utils::device_allocate_and_pack; + auto ptr_size_event_tuple = device_allocate_and_pack( + exec_q, host_task_events, + // common shape and strides + simplified_shape, simplified_cond_strides, simplified_x1_strides, + simplified_x2_strides, simplified_dst_strides); + auto packed_shape_strides_owner = + std::move(std::get<0>(ptr_size_event_tuple)); + sycl::event copy_shape_strides_ev = std::get<2>(ptr_size_event_tuple); + const py::ssize_t *packed_shape_strides = packed_shape_strides_owner.get(); + + std::vector all_deps; + all_deps.reserve(depends.size() + 1); + all_deps.insert(all_deps.end(), depends.begin(), depends.end()); + all_deps.push_back(copy_shape_strides_ev); + + assert(all_deps.size() == depends.size() + 1); + + sycl::event where_ev = fn(exec_q, nelems, nd, cond_data, x1_data, x2_data, + dst_data, packed_shape_strides, cond_offset, + x1_offset, x2_offset, dst_offset, all_deps); + + // free packed temporaries + sycl::event temporaries_cleanup_ev = + dpctl::tensor::alloc_utils::async_smart_free( + exec_q, {where_ev}, packed_shape_strides_owner); + host_task_events.push_back(temporaries_cleanup_ev); + + sycl::event arg_cleanup_ev = + keep_args_alive(exec_q, {x1, x2, condition, dst}, host_task_events); + + return std::make_pair(arg_cleanup_ev, where_ev); +} + +void init_where_dispatch_tables(void) +{ + using namespace td_ns; + using dpctl::tensor::kernels::search::WhereContigFactory; + DispatchTableBuilder + dtb1; + dtb1.populate_dispatch_table(where_contig_dispatch_table); + + using dpctl::tensor::kernels::search::WhereStridedFactory; + DispatchTableBuilder + dtb2; + dtb2.populate_dispatch_table(where_strided_dispatch_table); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/where.hpp b/dpctl_ext/tensor/libtensor/source/where.hpp new file mode 100644 index 000000000000..ba81d8b11642 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/where.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file declares Python API for implementation functions of +/// dpctl.tensor.where +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" + +namespace dpctl::tensor::py_internal +{ + +extern std::pair + py_where(const dpctl::tensor::usm_ndarray &, + const dpctl::tensor::usm_ndarray &, + const dpctl::tensor::usm_ndarray &, + const dpctl::tensor::usm_ndarray &, + sycl::queue &, + const std::vector &); + +extern void init_where_dispatch_tables(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index 47edf63a68b4..f3dd18153563 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -243,7 +243,7 @@ def dpnp_linspace( # Needed a special handling for denormal numbers (when step == 0), # see numpy#5437 for more details. # Note, dpt.where() is used to avoid a synchronization branch. - usm_res = dpt.where( + usm_res = dpt_ext.where( step == 0, (usm_res / step_num) * delta, usm_res * step ) else: diff --git a/dpnp/dpnp_algo/dpnp_elementwise_common.py b/dpnp/dpnp_algo/dpnp_elementwise_common.py index 55d74e8c1803..722b8cb3b3f0 100644 --- a/dpnp/dpnp_algo/dpnp_elementwise_common.py +++ b/dpnp/dpnp_algo/dpnp_elementwise_common.py @@ -30,7 +30,6 @@ from functools import wraps import dpctl.tensor as dpt -import dpctl.tensor._copy_utils as dtc import dpctl.tensor._type_utils as dtu import dpctl.utils as dpu import numpy @@ -48,6 +47,7 @@ # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._copy_utils as dtc import dpctl_ext.tensor._tensor_impl as dti import dpnp import dpnp.backend.extensions.vm._vm_impl as vmi diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index 0b6d882c53db..b3ed3770396d 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -38,13 +38,13 @@ import warnings import dpctl.tensor as dpt -import dpctl.tensor._type_utils as dtu -from dpctl.tensor._numpy_helper import AxisError # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._type_utils as dtu import dpnp +from dpctl_ext.tensor._numpy_helper import AxisError from . import memory as dpm diff --git a/dpnp/dpnp_iface_functional.py b/dpnp/dpnp_iface_functional.py index 1985eced2e71..797d8a736276 100644 --- a/dpnp/dpnp_iface_functional.py +++ b/dpnp/dpnp_iface_functional.py @@ -41,13 +41,15 @@ # pylint: disable=protected-access -from dpctl.tensor._numpy_helper import ( +import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import ( normalize_axis_index, normalize_axis_tuple, ) -import dpnp - # pylint: disable=no-name-in-module from dpnp.dpnp_utils import get_usm_allocations diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index f305b106221f..bc190db70c4e 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -250,7 +250,7 @@ def choose(a, choices, out=None, mode="wrap"): res_usm_type, exec_q = get_usm_allocations(choices + [inds]) # apply type promotion to input choices - res_dt = dpt.result_type(*choices) + res_dt = dpt_ext.result_type(*choices) if len(choices) > 1: choices = tuple( map( diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index e988bbaa237b..d5e1e1aa5706 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -47,16 +47,16 @@ import dpctl import dpctl.tensor as dpt import numpy -from dpctl.tensor._numpy_helper import ( - AxisError, - normalize_axis_index, - normalize_axis_tuple, -) # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext import dpnp +from dpctl_ext.tensor._numpy_helper import ( + AxisError, + normalize_axis_index, + normalize_axis_tuple, +) from .dpnp_array import dpnp_array @@ -1270,7 +1270,7 @@ def can_cast(from_, to, casting="safe"): if dpnp.is_supported_array_type(from_) else dpnp.dtype(from_) ) - return dpt.can_cast(dtype_from, to, casting=casting) + return dpt_ext.can_cast(dtype_from, to, casting=casting) def column_stack(tup): @@ -2837,7 +2837,7 @@ def repeat(a, repeats, axis=None): a = dpnp.ravel(a) usm_arr = dpnp.get_usm_ndarray(a) - usm_res = dpt.repeat(usm_arr, repeats, axis=axis) + usm_res = dpt_ext.repeat(usm_arr, repeats, axis=axis) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -3195,7 +3195,7 @@ def result_type(*arrays_and_dtypes): ) for X in arrays_and_dtypes ] - return dpt.result_type(*usm_arrays_and_dtypes) + return dpt_ext.result_type(*usm_arrays_and_dtypes) def roll(x, shift, axis=None): diff --git a/dpnp/dpnp_iface_mathematical.py b/dpnp/dpnp_iface_mathematical.py index e339c24d384c..06f4fe936253 100644 --- a/dpnp/dpnp_iface_mathematical.py +++ b/dpnp/dpnp_iface_mathematical.py @@ -40,6 +40,7 @@ """ # pylint: disable=protected-access +# pylint: disable=duplicate-code # pylint: disable=no-name-in-module @@ -48,17 +49,19 @@ import dpctl.tensor as dpt import dpctl.tensor._tensor_elementwise_impl as ti -import dpctl.tensor._type_utils as dtu import dpctl.utils as dpu import numpy -from dpctl.tensor._numpy_helper import ( - normalize_axis_index, - normalize_axis_tuple, -) -from dpctl.tensor._type_utils import _acceptance_fn_divide +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor._type_utils as dtu import dpnp import dpnp.backend.extensions.ufunc._ufunc_impl as ufi +from dpctl_ext.tensor._numpy_helper import ( + normalize_axis_index, + normalize_axis_tuple, +) from .dpnp_algo.dpnp_elementwise_common import ( DPNPI0, @@ -727,7 +730,7 @@ def clip(a, /, min=None, max=None, *, out=None, order="K", **kwargs): usm_max = None if max is None else dpnp.get_usm_ndarray_or_scalar(max) usm_out = None if out is None else dpnp.get_usm_ndarray(out) - usm_res = dpt.clip(usm_arr, usm_min, usm_max, out=usm_out, order=order) + usm_res = dpt_ext.clip(usm_arr, usm_min, usm_max, out=usm_out, order=order) if out is not None and isinstance(out, dpnp_array): return out return dpnp_array._create_from_usm_ndarray(usm_res) @@ -1561,7 +1564,7 @@ def diff(a, n=1, axis=-1, prepend=None, append=None): mkl_fn_to_call="_mkl_div_to_call", mkl_impl_fn="_div", binary_inplace_fn=ti._divide_inplace, - acceptance_fn=_acceptance_fn_divide, + acceptance_fn=dtu._acceptance_fn_divide, ) diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index 16ab633d506b..a2389978d506 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -44,6 +44,7 @@ # pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as dti import dpnp @@ -473,5 +474,7 @@ def where(condition, x=None, y=None, /, *, order="K", out=None): usm_condition = dpnp.get_usm_ndarray(condition) usm_out = None if out is None else dpnp.get_usm_ndarray(out) - usm_res = dpt.where(usm_condition, usm_x, usm_y, order=order, out=usm_out) + usm_res = dpt_ext.where( + usm_condition, usm_x, usm_y, order=order, out=usm_out + ) return dpnp.get_result_array(usm_res, out) diff --git a/dpnp/dpnp_iface_sorting.py b/dpnp/dpnp_iface_sorting.py index be6c52ae9d80..5f7a3829b3c9 100644 --- a/dpnp/dpnp_iface_sorting.py +++ b/dpnp/dpnp_iface_sorting.py @@ -42,12 +42,12 @@ from collections.abc import Sequence import dpctl.tensor as dpt -from dpctl.tensor._numpy_helper import normalize_axis_index # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext import dpnp +from dpctl_ext.tensor._numpy_helper import normalize_axis_index # pylint: disable=no-name-in-module from .dpnp_algo import ( diff --git a/dpnp/dpnp_iface_statistics.py b/dpnp/dpnp_iface_statistics.py index daff981d5cc4..9d3ccc40ecf5 100644 --- a/dpnp/dpnp_iface_statistics.py +++ b/dpnp/dpnp_iface_statistics.py @@ -45,7 +45,6 @@ import dpctl.tensor._tensor_elementwise_impl as ti import dpctl.utils as dpu import numpy -from dpctl.tensor._numpy_helper import normalize_axis_index # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor @@ -54,6 +53,7 @@ # pylint: disable=no-name-in-module import dpnp.backend.extensions.statistics._statistics_impl as statistics_ext +from dpctl_ext.tensor._numpy_helper import normalize_axis_index from dpnp.dpnp_utils.dpnp_utils_common import ( result_type_for_device, to_supported_dtypes, diff --git a/dpnp/dpnp_iface_trigonometric.py b/dpnp/dpnp_iface_trigonometric.py index a46f06c10e08..9894bd304701 100644 --- a/dpnp/dpnp_iface_trigonometric.py +++ b/dpnp/dpnp_iface_trigonometric.py @@ -45,8 +45,10 @@ import dpctl.tensor as dpt import dpctl.tensor._tensor_elementwise_impl as ti -import dpctl.tensor._type_utils as dtu +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._type_utils as dtu import dpnp import dpnp.backend.extensions.ufunc._ufunc_impl as ufi diff --git a/dpnp/dpnp_iface_types.py b/dpnp/dpnp_iface_types.py index 8fdb9e1d3d38..f133333d6b83 100644 --- a/dpnp/dpnp_iface_types.py +++ b/dpnp/dpnp_iface_types.py @@ -40,6 +40,9 @@ import dpctl.tensor as dpt import numpy +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from .dpnp_array import dpnp_array @@ -211,7 +214,7 @@ def finfo(dtype): """ if isinstance(dtype, dpnp_array): dtype = dtype.dtype - return dpt.finfo(dtype) + return dpt_ext.finfo(dtype) # pylint: disable=redefined-outer-name @@ -244,7 +247,7 @@ def iinfo(dtype): if isinstance(dtype, dpnp_array): dtype = dtype.dtype - return dpt.iinfo(dtype) + return dpt_ext.iinfo(dtype) def isdtype(dtype, kind): @@ -298,7 +301,7 @@ def isdtype(dtype, kind): elif isinstance(kind, tuple): kind = tuple(dpt.dtype(k) if isinstance(k, type) else k for k in kind) - return dpt.isdtype(dtype, kind) + return dpt_ext.isdtype(dtype, kind) def issubdtype(arg1, arg2): diff --git a/dpnp/dpnp_utils/dpnp_utils_common.py b/dpnp/dpnp_utils/dpnp_utils_common.py index e4bde2e1ec86..aa294fefe275 100644 --- a/dpnp/dpnp_utils/dpnp_utils_common.py +++ b/dpnp/dpnp_utils/dpnp_utils_common.py @@ -29,8 +29,9 @@ from collections.abc import Iterable -import dpctl.tensor._type_utils as dtu - +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor._type_utils as dtu import dpnp from dpnp.dpnp_utils import map_dtype_to_device diff --git a/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py b/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py index 9ad97742ee18..b01f57eaecdd 100644 --- a/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py +++ b/dpnp/dpnp_utils/dpnp_utils_linearalgebra.py @@ -30,11 +30,6 @@ import dpctl.tensor as dpt import dpctl.utils as dpu import numpy -from dpctl.tensor._numpy_helper import ( - AxisError, - normalize_axis_index, - normalize_axis_tuple, -) from dpctl.utils import ExecutionPlacementError # pylint: disable=no-name-in-module @@ -43,6 +38,11 @@ import dpctl_ext.tensor._tensor_impl as ti import dpnp import dpnp.backend.extensions.blas._blas_impl as bi +from dpctl_ext.tensor._numpy_helper import ( + AxisError, + normalize_axis_index, + normalize_axis_tuple, +) from dpnp.dpnp_array import dpnp_array from dpnp.dpnp_utils import get_usm_allocations diff --git a/dpnp/dpnp_utils/dpnp_utils_statistics.py b/dpnp/dpnp_utils/dpnp_utils_statistics.py index 3a3bc04a31af..ec67b619a13f 100644 --- a/dpnp/dpnp_utils/dpnp_utils_statistics.py +++ b/dpnp/dpnp_utils/dpnp_utils_statistics.py @@ -30,10 +30,13 @@ import dpctl import dpctl.tensor as dpt -from dpctl.tensor._numpy_helper import normalize_axis_tuple from dpctl.utils import ExecutionPlacementError import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import normalize_axis_tuple from dpnp.dpnp_array import dpnp_array __all__ = ["dpnp_cov", "dpnp_median"] diff --git a/dpnp/fft/dpnp_utils_fft.py b/dpnp/fft/dpnp_utils_fft.py index 534b9404254f..b959b78e1ad0 100644 --- a/dpnp/fft/dpnp_utils_fft.py +++ b/dpnp/fft/dpnp_utils_fft.py @@ -49,10 +49,6 @@ import dpctl.tensor._tensor_impl as ti import dpctl.utils as dpu import numpy -from dpctl.tensor._numpy_helper import ( - normalize_axis_index, - normalize_axis_tuple, -) from dpctl.utils import ExecutionPlacementError # pylint: disable=no-name-in-module @@ -61,6 +57,10 @@ import dpctl_ext.tensor._tensor_impl as ti_ext import dpnp import dpnp.backend.extensions.fft._fft_impl as fi +from dpctl_ext.tensor._numpy_helper import ( + normalize_axis_index, + normalize_axis_tuple, +) from ..dpnp_array import dpnp_array from ..dpnp_utils import map_dtype_to_device diff --git a/dpnp/linalg/dpnp_iface_linalg.py b/dpnp/linalg/dpnp_iface_linalg.py index 6959565ecf17..f4e0f96da5e6 100644 --- a/dpnp/linalg/dpnp_iface_linalg.py +++ b/dpnp/linalg/dpnp_iface_linalg.py @@ -45,9 +45,12 @@ from typing import NamedTuple import numpy -from dpctl.tensor._numpy_helper import normalize_axis_tuple import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import normalize_axis_tuple from dpnp.backend.extensions.lapack._lapack_impl import LinAlgError from .dpnp_utils_linalg import ( diff --git a/dpnp/linalg/dpnp_utils_linalg.py b/dpnp/linalg/dpnp_utils_linalg.py index c6897e7b0614..28e11f6188c5 100644 --- a/dpnp/linalg/dpnp_utils_linalg.py +++ b/dpnp/linalg/dpnp_utils_linalg.py @@ -44,7 +44,6 @@ import dpctl.utils as dpu import numpy -from dpctl.tensor._numpy_helper import normalize_axis_index from numpy import prod # pylint: disable=no-name-in-module @@ -53,6 +52,7 @@ import dpctl_ext.tensor._tensor_impl as ti import dpnp import dpnp.backend.extensions.lapack._lapack_impl as li +from dpctl_ext.tensor._numpy_helper import normalize_axis_index from dpnp.dpnp_utils import get_usm_allocations diff --git a/dpnp/tests/test_arraymanipulation.py b/dpnp/tests/test_arraymanipulation.py index ba83ee94d8b0..17d2c07fd6cc 100644 --- a/dpnp/tests/test_arraymanipulation.py +++ b/dpnp/tests/test_arraymanipulation.py @@ -3,11 +3,14 @@ import dpctl.tensor as dpt import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from numpy.testing import assert_array_equal, assert_equal, assert_raises import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import get_all_dtypes, get_float_complex_dtypes from .third_party.cupy import testing diff --git a/dpnp/tests/test_counting.py b/dpnp/tests/test_counting.py index 762abd58b687..9210e7c1b3dd 100644 --- a/dpnp/tests/test_counting.py +++ b/dpnp/tests/test_counting.py @@ -1,6 +1,5 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from numpy.testing import ( assert_allclose, assert_equal, @@ -9,6 +8,10 @@ import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import ( get_all_dtypes, get_float_dtypes, diff --git a/dpnp/tests/test_flipping.py b/dpnp/tests/test_flipping.py index cc84242f4557..cd55846e3668 100644 --- a/dpnp/tests/test_flipping.py +++ b/dpnp/tests/test_flipping.py @@ -2,13 +2,16 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from numpy.testing import ( assert_equal, ) import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import ( get_all_dtypes, ) diff --git a/dpnp/tests/test_indexing.py b/dpnp/tests/test_indexing.py index 9a55efe138b7..79c41a2f45f7 100644 --- a/dpnp/tests/test_indexing.py +++ b/dpnp/tests/test_indexing.py @@ -4,8 +4,6 @@ import dpctl.tensor as dpt import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError -from dpctl.tensor._type_utils import _to_device_supported_dtype from dpctl.utils import ExecutionPlacementError from numpy.testing import ( assert_, @@ -16,6 +14,11 @@ ) import dpnp + +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError +from dpctl_ext.tensor._type_utils import _to_device_supported_dtype from dpnp.dpnp_array import dpnp_array from .helper import ( diff --git a/dpnp/tests/test_linalg.py b/dpnp/tests/test_linalg.py index 31d99d71ce49..e1ad9af7d220 100644 --- a/dpnp/tests/test_linalg.py +++ b/dpnp/tests/test_linalg.py @@ -4,7 +4,6 @@ import dpctl.tensor as dpt import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from dpctl.utils import ExecutionPlacementError from numpy.testing import ( assert_allclose, @@ -16,6 +15,10 @@ import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import ( assert_dtype_allclose, generate_random_numpy_array, diff --git a/dpnp/tests/test_manipulation.py b/dpnp/tests/test_manipulation.py index 8ddba08dbb92..2512c0955da7 100644 --- a/dpnp/tests/test_manipulation.py +++ b/dpnp/tests/test_manipulation.py @@ -3,7 +3,6 @@ import dpctl.tensor as dpt import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from numpy.testing import ( assert_array_equal, assert_equal, @@ -12,6 +11,10 @@ import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import ( assert_dtype_allclose, get_all_dtypes, diff --git a/dpnp/tests/test_mathematical.py b/dpnp/tests/test_mathematical.py index 760c1a0ceb2e..ef8f6731ffd2 100644 --- a/dpnp/tests/test_mathematical.py +++ b/dpnp/tests/test_mathematical.py @@ -2,10 +2,6 @@ import dpctl.tensor as dpt import numpy import pytest -from dpctl.tensor._numpy_helper import ( - AxisError, - normalize_axis_index, -) from dpctl.utils import ExecutionPlacementError from numpy.testing import ( assert_allclose, @@ -16,6 +12,13 @@ ) import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import ( + AxisError, + normalize_axis_index, +) from dpnp.dpnp_array import dpnp_array from dpnp.dpnp_utils import map_dtype_to_device diff --git a/dpnp/tests/test_product.py b/dpnp/tests/test_product.py index afe767a5e5d9..9c2bc54e30b5 100644 --- a/dpnp/tests/test_product.py +++ b/dpnp/tests/test_product.py @@ -1,11 +1,14 @@ import dpctl import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from dpctl.utils import ExecutionPlacementError from numpy.testing import assert_allclose, assert_array_equal, assert_raises import dpnp + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.dpnp_utils import map_dtype_to_device from .helper import ( diff --git a/dpnp/tests/test_sort.py b/dpnp/tests/test_sort.py index 5e883c575f85..73eac4064892 100644 --- a/dpnp/tests/test_sort.py +++ b/dpnp/tests/test_sort.py @@ -1,10 +1,13 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError from numpy.testing import assert_array_equal, assert_equal, assert_raises import dpnp +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + from .helper import ( assert_dtype_allclose, generate_random_numpy_array, diff --git a/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py b/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py index 95d753c90473..085261317ead 100644 --- a/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py +++ b/dpnp/tests/third_party/cupy/core_tests/test_ndarray.py @@ -6,13 +6,16 @@ import dpctl import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError # from cupy_backends.cuda.api import driver # from cupy_backends.cuda.api import runtime # from cupy_backends.cuda import stream as stream_module import dpnp as cupy +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError + # from cupy import _util # from cupy import _core # from cupy import cuda diff --git a/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py b/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py index c241824fa81d..a1309f3ed83d 100644 --- a/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py +++ b/dpnp/tests/third_party/cupy/lib_tests/test_shape_base.py @@ -2,9 +2,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py index 7355d07e1d9b..8944a6b944c9 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_dims.py @@ -2,9 +2,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.third_party.cupy import testing diff --git a/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py b/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py index 7e7a62dce52a..0f6bed1c2ced 100644 --- a/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py +++ b/dpnp/tests/third_party/cupy/manipulation_tests/test_transpose.py @@ -2,9 +2,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.third_party.cupy import testing diff --git a/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py b/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py index b8f98456a13a..cb7200c1b13b 100644 --- a/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py +++ b/dpnp/tests/third_party/cupy/math_tests/test_sumprod.py @@ -2,9 +2,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.helper import ( has_support_aspect16, has_support_aspect64, diff --git a/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py b/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py index 7e0eade13254..8359ba580a25 100644 --- a/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py +++ b/dpnp/tests/third_party/cupy/sorting_tests/test_sort.py @@ -4,9 +4,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.helper import has_support_aspect64 from dpnp.tests.third_party.cupy import testing diff --git a/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py b/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py index bf5d37df2fba..d355d18985f2 100644 --- a/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py +++ b/dpnp/tests/third_party/cupy/statistics_tests/test_meanvar.py @@ -2,9 +2,12 @@ import numpy import pytest -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests.helper import has_support_aspect16, has_support_aspect64 from dpnp.tests.third_party.cupy import testing diff --git a/dpnp/tests/third_party/cupy/testing/_loops.py b/dpnp/tests/third_party/cupy/testing/_loops.py index 63cd09147c4b..66c243a3d7f7 100644 --- a/dpnp/tests/third_party/cupy/testing/_loops.py +++ b/dpnp/tests/third_party/cupy/testing/_loops.py @@ -10,9 +10,12 @@ import numpy import pytest from dpctl import select_default_device -from dpctl.tensor._numpy_helper import AxisError import dpnp as cupy + +# TODO: revert to `from dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +from dpctl_ext.tensor._numpy_helper import AxisError from dpnp.tests import config from dpnp.tests.third_party.cupy.testing import _array, _parameterized from dpnp.tests.third_party.cupy.testing._pytest_impl import is_available From 585f2e5fc836841ae6a534621ee60e9b9790b73f Mon Sep 17 00:00:00 2001 From: vlad-perevezentsev Date: Thu, 5 Mar 2026 15:44:55 +0100 Subject: [PATCH 10/11] Extend `._tensor_impl` with linear sequences functions (#2782) This PR is the final one in the series of extending `_tensor_impl` extension It extends `_tensor_impl` in `dpctl_ext.tensor` with linear sequence functions (`_linspace_step and _linspace_affine`) Also this PR significantly expands Python API of `dpctl_ext.tensor` by adding all missing functions from `dpctl_ext.tensor._ctors` and `dpctl_ext.tensor._manipulation_functions` `_tensor_impl`: 45 / 45 functions Python API dpctl_ext.tensor: 70 / 233 functions --- dpctl_ext/tensor/CMakeLists.txt | 4 +- dpctl_ext/tensor/__init__.py | 46 + dpctl_ext/tensor/_clip.py | 74 +- dpctl_ext/tensor/_copy_utils.py | 58 +- dpctl_ext/tensor/_ctors.py | 1776 +++++++++++++++-- dpctl_ext/tensor/_indexing_functions.py | 14 +- dpctl_ext/tensor/_manipulation_functions.py | 727 ++++++- dpctl_ext/tensor/_reshape.py | 8 +- dpctl_ext/tensor/_scalar_utils.py | 6 +- dpctl_ext/tensor/_search_functions.py | 24 +- .../include/kernels/constructors.hpp | 178 ++ .../libtensor/source/linear_sequences.cpp | 306 +++ .../libtensor/source/linear_sequences.hpp | 66 + .../tensor/libtensor/source/tensor_ctors.cpp | 38 +- dpnp/dpnp_algo/dpnp_arraycreation.py | 14 +- dpnp/dpnp_algo/dpnp_elementwise_common.py | 24 +- dpnp/dpnp_algo/dpnp_fill.py | 7 +- dpnp/dpnp_array.py | 2 +- dpnp/dpnp_container.py | 13 +- dpnp/dpnp_iface.py | 2 +- dpnp/dpnp_iface_arraycreation.py | 8 +- dpnp/dpnp_iface_indexing.py | 16 +- dpnp/dpnp_iface_manipulation.py | 28 +- dpnp/dpnp_iface_searching.py | 2 +- dpnp/fft/dpnp_utils_fft.py | 12 +- dpnp/tests/test_arraycreation.py | 5 +- dpnp/tests/test_arraymanipulation.py | 6 +- dpnp/tests/test_fft.py | 4 +- dpnp/tests/test_indexing.py | 5 +- dpnp/tests/test_linalg.py | 4 +- dpnp/tests/test_manipulation.py | 4 +- dpnp/tests/test_mathematical.py | 13 +- dpnp/tests/test_memory.py | 5 +- dpnp/tests/test_nanfunctions.py | 4 +- dpnp/tests/test_ndarray.py | 5 +- dpnp/tests/test_search.py | 4 +- dpnp/tests/test_statistics.py | 4 +- dpnp/tests/test_sycl_queue.py | 4 +- dpnp/tests/test_usm_type.py | 4 +- dpnp/tests/test_utils.py | 4 +- 40 files changed, 3194 insertions(+), 334 deletions(-) create mode 100644 dpctl_ext/tensor/libtensor/source/linear_sequences.cpp create mode 100644 dpctl_ext/tensor/libtensor/source/linear_sequences.hpp diff --git a/dpctl_ext/tensor/CMakeLists.txt b/dpctl_ext/tensor/CMakeLists.txt index 6f823a818ce7..864e34ddaba4 100644 --- a/dpctl_ext/tensor/CMakeLists.txt +++ b/dpctl_ext/tensor/CMakeLists.txt @@ -51,7 +51,7 @@ set(_tensor_impl_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_numpy_ndarray_into_usm_ndarray.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_reshape.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_for_roll.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/integer_advanced_indexing.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/boolean_advanced_indexing.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/eye_ctor.cpp @@ -93,7 +93,7 @@ endif() set(_no_fast_math_sources ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/copy_and_cast_usm_to_usm.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/full_ctor.cpp - # ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/linear_sequences.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/clip.cpp ${CMAKE_CURRENT_SOURCE_DIR}/libtensor/source/where.cpp ) diff --git a/dpctl_ext/tensor/__init__.py b/dpctl_ext/tensor/__init__.py index 2c1e761beb3b..9d4013e146a7 100644 --- a/dpctl_ext/tensor/__init__.py +++ b/dpctl_ext/tensor/__init__.py @@ -36,10 +36,21 @@ to_numpy, ) from ._ctors import ( + arange, + asarray, + empty, + empty_like, eye, full, + full_like, + linspace, + meshgrid, + ones, + ones_like, tril, triu, + zeros, + zeros_like, ) from ._indexing_functions import ( extract, @@ -51,27 +62,55 @@ take_along_axis, ) from ._manipulation_functions import ( + broadcast_arrays, + broadcast_to, + concat, + expand_dims, + flip, + moveaxis, + permute_dims, repeat, roll, + squeeze, + stack, + swapaxes, + tile, + unstack, ) from ._reshape import reshape from ._search_functions import where from ._type_utils import can_cast, finfo, iinfo, isdtype, result_type __all__ = [ + "arange", + "asarray", "asnumpy", "astype", + "broadcast_arrays", + "broadcast_to", "can_cast", + "concat", "copy", "clip", + "empty", + "empty_like", "extract", + "expand_dims", "eye", "finfo", + "flip", "from_numpy", "full", + "full_like", "iinfo", "isdtype", + "linspace", + "meshgrid", + "moveaxis", + "permute_dims", "nonzero", + "ones", + "ones_like", "place", "put", "put_along_axis", @@ -79,10 +118,17 @@ "reshape", "result_type", "roll", + "squeeze", + "stack", + "swapaxes", "take", "take_along_axis", + "tile", "to_numpy", "tril", "triu", + "unstack", "where", + "zeros", + "zeros_like", ] diff --git a/dpctl_ext/tensor/_clip.py b/dpctl_ext/tensor/_clip.py index f145e9f2d98d..ef07269c4ea0 100644 --- a/dpctl_ext/tensor/_clip.py +++ b/dpctl_ext/tensor/_clip.py @@ -163,7 +163,7 @@ def _clip_none(x, val, out, order, _binary_fn): if ti._array_overlap(x, out): if not ti._same_logical_tensors(x, out): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(val, dpt.usm_ndarray): if ( @@ -171,12 +171,12 @@ def _clip_none(x, val, out, order, _binary_fn): and not ti._same_logical_tensors(val, out) and val_dtype == res_dt ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(val, dpt.usm_ndarray): val_ary = val else: - val_ary = dpt.asarray(val, dtype=val_dtype, sycl_queue=exec_q) + val_ary = dpt_ext.asarray(val, dtype=val_dtype, sycl_queue=exec_q) if order == "A": order = ( @@ -197,7 +197,7 @@ def _clip_none(x, val, out, order, _binary_fn): x, val_ary, res_dt, res_shape, res_usm_type, exec_q ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -205,9 +205,9 @@ def _clip_none(x, val, out, order, _binary_fn): order=order, ) if x_shape != res_shape: - x = dpt.broadcast_to(x, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) if val_ary.shape != res_shape: - val_ary = dpt.broadcast_to(val_ary, res_shape) + val_ary = dpt_ext.broadcast_to(val_ary, res_shape) _manager = SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events ht_binary_ev, binary_ev = _binary_fn( @@ -229,7 +229,7 @@ def _clip_none(x, val, out, order, _binary_fn): if order == "K": buf = _empty_like_orderK(val_ary, res_dt) else: - buf = dpt.empty_like(val_ary, dtype=res_dt, order=order) + buf = dpt_ext.empty_like(val_ary, dtype=res_dt, order=order) _manager = SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( @@ -242,7 +242,7 @@ def _clip_none(x, val, out, order, _binary_fn): x, buf, res_dt, res_shape, res_usm_type, exec_q ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -251,8 +251,8 @@ def _clip_none(x, val, out, order, _binary_fn): ) if x_shape != res_shape: - x = dpt.broadcast_to(x, res_shape) - buf = dpt.broadcast_to(buf, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) + buf = dpt_ext.broadcast_to(buf, res_shape) ht_binary_ev, binary_ev = _binary_fn( src1=x, src2=buf, @@ -353,14 +353,14 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if ti._array_overlap(x, out): if not ti._same_logical_tensors(x, out): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) else: return out else: if order == "K": out = _empty_like_orderK(x, x.dtype) else: - out = dpt.empty_like(x, order=order) + out = dpt_ext.empty_like(x, order=order) _manager = SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events @@ -519,7 +519,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if ti._array_overlap(x, out): if not ti._same_logical_tensors(x, out): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(min, dpt.usm_ndarray): if ( @@ -527,7 +527,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): and not ti._same_logical_tensors(min, out) and buf1_dt is None ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(max, dpt.usm_ndarray): if ( @@ -535,16 +535,16 @@ def clip(x, /, min=None, max=None, out=None, order="K"): and not ti._same_logical_tensors(max, out) and buf2_dt is None ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(min, dpt.usm_ndarray): a_min = min else: - a_min = dpt.asarray(min, dtype=min_dtype, sycl_queue=exec_q) + a_min = dpt_ext.asarray(min, dtype=min_dtype, sycl_queue=exec_q) if isinstance(max, dpt.usm_ndarray): a_max = max else: - a_max = dpt.asarray(max, dtype=max_dtype, sycl_queue=exec_q) + a_max = dpt_ext.asarray(max, dtype=max_dtype, sycl_queue=exec_q) if order == "A": order = ( @@ -572,7 +572,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): exec_q, ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -580,11 +580,11 @@ def clip(x, /, min=None, max=None, out=None, order="K"): order=order, ) if x_shape != res_shape: - x = dpt.broadcast_to(x, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) if a_min.shape != res_shape: - a_min = dpt.broadcast_to(a_min, res_shape) + a_min = dpt_ext.broadcast_to(a_min, res_shape) if a_max.shape != res_shape: - a_max = dpt.broadcast_to(a_max, res_shape) + a_max = dpt_ext.broadcast_to(a_max, res_shape) _manager = SequentialOrderManager[exec_q] dep_ev = _manager.submitted_events ht_binary_ev, binary_ev = ti._clip( @@ -612,7 +612,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if order == "K": buf2 = _empty_like_orderK(a_max, buf2_dt) else: - buf2 = dpt.empty_like(a_max, dtype=buf2_dt, order=order) + buf2 = dpt_ext.empty_like(a_max, dtype=buf2_dt, order=order) _manager = SequentialOrderManager[exec_q] dep_ev = _manager.submitted_events ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( @@ -631,7 +631,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): exec_q, ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -639,10 +639,10 @@ def clip(x, /, min=None, max=None, out=None, order="K"): order=order, ) - x = dpt.broadcast_to(x, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) if a_min.shape != res_shape: - a_min = dpt.broadcast_to(a_min, res_shape) - buf2 = dpt.broadcast_to(buf2, res_shape) + a_min = dpt_ext.broadcast_to(a_min, res_shape) + buf2 = dpt_ext.broadcast_to(buf2, res_shape) ht_binary_ev, binary_ev = ti._clip( src=x, min=a_min, @@ -668,7 +668,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if order == "K": buf1 = _empty_like_orderK(a_min, buf1_dt) else: - buf1 = dpt.empty_like(a_min, dtype=buf1_dt, order=order) + buf1 = dpt_ext.empty_like(a_min, dtype=buf1_dt, order=order) _manager = SequentialOrderManager[exec_q] dep_ev = _manager.submitted_events ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( @@ -687,7 +687,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): exec_q, ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -695,10 +695,10 @@ def clip(x, /, min=None, max=None, out=None, order="K"): order=order, ) - x = dpt.broadcast_to(x, res_shape) - buf1 = dpt.broadcast_to(buf1, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) + buf1 = dpt_ext.broadcast_to(buf1, res_shape) if a_max.shape != res_shape: - a_max = dpt.broadcast_to(a_max, res_shape) + a_max = dpt_ext.broadcast_to(a_max, res_shape) ht_binary_ev, binary_ev = ti._clip( src=x, min=buf1, @@ -736,7 +736,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if order == "K": buf1 = _empty_like_orderK(a_min, buf1_dt) else: - buf1 = dpt.empty_like(a_min, dtype=buf1_dt, order=order) + buf1 = dpt_ext.empty_like(a_min, dtype=buf1_dt, order=order) _manager = SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events @@ -747,7 +747,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): if order == "K": buf2 = _empty_like_orderK(a_max, buf2_dt) else: - buf2 = dpt.empty_like(a_max, dtype=buf2_dt, order=order) + buf2 = dpt_ext.empty_like(a_max, dtype=buf2_dt, order=order) ht_copy2_ev, copy2_ev = ti._copy_usm_ndarray_into_usm_ndarray( src=a_max, dst=buf2, sycl_queue=exec_q, depends=dep_evs ) @@ -758,7 +758,7 @@ def clip(x, /, min=None, max=None, out=None, order="K"): x, buf1, buf2, res_dt, res_shape, res_usm_type, exec_q ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=res_dt, usm_type=res_usm_type, @@ -766,9 +766,9 @@ def clip(x, /, min=None, max=None, out=None, order="K"): order=order, ) - x = dpt.broadcast_to(x, res_shape) - buf1 = dpt.broadcast_to(buf1, res_shape) - buf2 = dpt.broadcast_to(buf2, res_shape) + x = dpt_ext.broadcast_to(x, res_shape) + buf1 = dpt_ext.broadcast_to(buf1, res_shape) + buf2 = dpt_ext.broadcast_to(buf2, res_shape) ht_, clip_ev = ti._clip( src=x, min=buf1, diff --git a/dpctl_ext/tensor/_copy_utils.py b/dpctl_ext/tensor/_copy_utils.py index 64689057eb84..37879997b788 100644 --- a/dpctl_ext/tensor/_copy_utils.py +++ b/dpctl_ext/tensor/_copy_utils.py @@ -91,7 +91,7 @@ def _copy_from_numpy(np_ary, usm_type="device", sycl_queue=None): ) else: Xusm_dtype = dt - Xusm = dpt.empty( + Xusm = dpt_ext.empty( Xnp.shape, dtype=Xusm_dtype, usm_type=usm_type, sycl_queue=sycl_queue ) _copy_from_numpy_into(Xusm, Xnp) @@ -159,7 +159,7 @@ def _extract_impl(ary, ary_mask, axis=0): elif isinstance(ary_mask, np.ndarray): dst_usm_type = ary.usm_type exec_q = ary.sycl_queue - ary_mask = dpt.asarray( + ary_mask = dpt_ext.asarray( ary_mask, usm_type=dst_usm_type, sycl_queue=exec_q ) else: @@ -176,7 +176,7 @@ def _extract_impl(ary, ary_mask, axis=0): ) mask_nelems = ary_mask.size cumsum_dt = dpt.int32 if mask_nelems < int32_t_max else dpt.int64 - cumsum = dpt.empty(mask_nelems, dtype=cumsum_dt, device=ary_mask.device) + cumsum = dpt_ext.empty(mask_nelems, dtype=cumsum_dt, device=ary_mask.device) exec_q = cumsum.sycl_queue _manager = dpctl.utils.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events @@ -184,7 +184,7 @@ def _extract_impl(ary, ary_mask, axis=0): ary_mask, cumsum, sycl_queue=exec_q, depends=dep_evs ) dst_shape = ary.shape[:pp] + (mask_count,) + ary.shape[pp + mask_nd :] - dst = dpt.empty( + dst = dpt_ext.empty( dst_shape, dtype=ary.dtype, usm_type=dst_usm_type, device=ary.device ) if dst.size == 0: @@ -247,7 +247,7 @@ def _nonzero_impl(ary): usm_type = ary.usm_type mask_nelems = ary.size cumsum_dt = dpt.int32 if mask_nelems < int32_t_max else dpt.int64 - cumsum = dpt.empty( + cumsum = dpt_ext.empty( mask_nelems, dtype=cumsum_dt, sycl_queue=exec_q, order="C" ) _manager = dpctl.utils.SequentialOrderManager[exec_q] @@ -256,7 +256,7 @@ def _nonzero_impl(ary): ary, cumsum, sycl_queue=exec_q, depends=dep_evs ) indexes_dt = ti.default_device_index_type(exec_q.sycl_device) - indexes = dpt.empty( + indexes = dpt_ext.empty( (ary.ndim, mask_count), dtype=indexes_dt, usm_type=usm_type, @@ -284,7 +284,7 @@ def _prepare_indices_arrays(inds, q, usm_type): lambda ind: ( ind if isinstance(ind, dpt.usm_ndarray) - else dpt.asarray(ind, usm_type=usm_type, sycl_queue=q) + else dpt_ext.asarray(ind, usm_type=usm_type, sycl_queue=q) ), inds, ) @@ -299,14 +299,14 @@ def _prepare_indices_arrays(inds, q, usm_type): inds = tuple( map( lambda ind: ( - ind if ind.dtype == ind_dt else dpt.astype(ind, ind_dt) + ind if ind.dtype == ind_dt else dpt_ext.astype(ind, ind_dt) ), inds, ) ) # broadcast - inds = dpt.broadcast_arrays(*inds) + inds = dpt_ext.broadcast_arrays(*inds) return inds @@ -332,7 +332,7 @@ def _put_multi_index(ary, inds, p, vals, mode=0): if exec_q is not None: if not isinstance(vals, dpt.usm_ndarray): - vals = dpt.asarray( + vals = dpt_ext.asarray( vals, dtype=ary.dtype, usm_type=coerced_usm_type, @@ -368,7 +368,7 @@ def _put_multi_index(ary, inds, p, vals, mode=0): rhs = vals else: rhs = dpt_ext.astype(vals, ary.dtype) - rhs = dpt.broadcast_to(rhs, expected_vals_shape) + rhs = dpt_ext.broadcast_to(rhs, expected_vals_shape) _manager = dpctl.utils.SequentialOrderManager[exec_q] dep_ev = _manager.submitted_events hev, put_ev = ti._put( @@ -418,7 +418,7 @@ def _take_multi_index(ary, inds, p, mode=0): if 0 in ary_sh[p:p_end] and ind0.size != 0: raise IndexError("cannot take non-empty indices from an empty axis") res_shape = ary_sh[:p] + ind0.shape + ary_sh[p_end:] - res = dpt.empty( + res = dpt_ext.empty( res_shape, dtype=ary.dtype, usm_type=res_usm_type, sycl_queue=exec_q ) _manager = dpctl.utils.SequentialOrderManager[exec_q] @@ -681,7 +681,9 @@ def _make_empty_like_orderK(x, dt, usm_type, dev): inv_perm = sorted(range(x.ndim), key=lambda i: perm[i]) sh = x.shape sh_sorted = tuple(sh[i] for i in perm) - R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + R = dpt_ext.empty( + sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) if min(st) < 0: st_sorted = [st[i] for i in perm] sl = tuple( @@ -693,7 +695,7 @@ def _make_empty_like_orderK(x, dt, usm_type, dev): for i in range(x.ndim) ) R = R[sl] - return dpt.permute_dims(R, inv_perm) + return dpt_ext.permute_dims(R, inv_perm) def _empty_like_orderK(x, dt, usm_type=None, dev=None): @@ -712,11 +714,11 @@ def _empty_like_orderK(x, dt, usm_type=None, dev=None): dev = x.device fl = x.flags if fl["C"] or x.size <= 1: - return dpt.empty_like( + return dpt_ext.empty_like( x, dtype=dt, usm_type=usm_type, device=dev, order="C" ) elif fl["F"]: - return dpt.empty_like( + return dpt_ext.empty_like( x, dtype=dt, usm_type=usm_type, device=dev, order="F" ) return _make_empty_like_orderK(x, dt, usm_type, dev) @@ -734,11 +736,11 @@ def _from_numpy_empty_like_orderK(x, dt, usm_type, dev): raise TypeError(f"Expected numpy.ndarray, got {type(x)}") fl = x.flags if fl["C"] or x.size <= 1: - return dpt.empty( + return dpt_ext.empty( x.shape, dtype=dt, usm_type=usm_type, device=dev, order="C" ) elif fl["F"]: - return dpt.empty( + return dpt_ext.empty( x.shape, dtype=dt, usm_type=usm_type, device=dev, order="F" ) return _make_empty_like_orderK(x, dt, usm_type, dev) @@ -758,11 +760,11 @@ def _empty_like_pair_orderK(X1, X2, dt, res_shape, usm_type, dev): fl1 = X1.flags fl2 = X2.flags if fl1["C"] or fl2["C"]: - return dpt.empty( + return dpt_ext.empty( res_shape, dtype=dt, usm_type=usm_type, device=dev, order="C" ) if fl1["F"] and fl2["F"]: - return dpt.empty( + return dpt_ext.empty( res_shape, dtype=dt, usm_type=usm_type, device=dev, order="F" ) st1 = list(X1.strides) @@ -785,7 +787,9 @@ def _empty_like_pair_orderK(X1, X2, dt, res_shape, usm_type, dev): st2_sorted = [st2[i] for i in perm] sh = res_shape sh_sorted = tuple(sh[i] for i in perm) - R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + R = dpt_ext.empty( + sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) if max(min(st1_sorted), min(st2_sorted)) < 0: sl = tuple( ( @@ -796,7 +800,7 @@ def _empty_like_pair_orderK(X1, X2, dt, res_shape, usm_type, dev): for i in range(nd1) ) R = R[sl] - return dpt.permute_dims(R, inv_perm) + return dpt_ext.permute_dims(R, inv_perm) def _empty_like_triple_orderK(X1, X2, X3, dt, res_shape, usm_type, dev): @@ -823,11 +827,11 @@ def _empty_like_triple_orderK(X1, X2, X3, dt, res_shape, usm_type, dev): fl2 = X2.flags fl3 = X3.flags if fl1["C"] or fl2["C"] or fl3["C"]: - return dpt.empty( + return dpt_ext.empty( res_shape, dtype=dt, usm_type=usm_type, device=dev, order="C" ) if fl1["F"] and fl2["F"] and fl3["F"]: - return dpt.empty( + return dpt_ext.empty( res_shape, dtype=dt, usm_type=usm_type, device=dev, order="F" ) st1 = list(X1.strides) @@ -855,7 +859,9 @@ def _empty_like_triple_orderK(X1, X2, X3, dt, res_shape, usm_type, dev): st3_sorted = [st3[i] for i in perm] sh = res_shape sh_sorted = tuple(sh[i] for i in perm) - R = dpt.empty(sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C") + R = dpt_ext.empty( + sh_sorted, dtype=dt, usm_type=usm_type, device=dev, order="C" + ) if max(min(st1_sorted), min(st2_sorted), min(st3_sorted)) < 0: sl = tuple( ( @@ -870,7 +876,7 @@ def _empty_like_triple_orderK(X1, X2, X3, dt, res_shape, usm_type, dev): for i in range(nd1) ) R = R[sl] - return dpt.permute_dims(R, inv_perm) + return dpt_ext.permute_dims(R, inv_perm) def copy(usm_ary, /, *, order="K"): diff --git a/dpctl_ext/tensor/_ctors.py b/dpctl_ext/tensor/_ctors.py index 5a9e07c73346..0b7650873fe3 100644 --- a/dpctl_ext/tensor/_ctors.py +++ b/dpctl_ext/tensor/_ctors.py @@ -30,17 +30,292 @@ from numbers import Number import dpctl +import dpctl.memory as dpm import dpctl.tensor as dpt import dpctl.utils import numpy as np from dpctl.tensor._data_types import _get_dtype from dpctl.tensor._device import normalize_queue_device +from dpctl.tensor._usmarray import _is_object_with_buffer_protocol # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_impl as ti +from ._copy_utils import ( + _empty_like_orderK, + _from_numpy_empty_like_orderK, +) + +__doc__ = "Implementation of creation functions in :module:`dpctl.tensor`" + +_empty_tuple = () +_host_set = frozenset([None]) + + +def _array_info_dispatch(obj): + if isinstance(obj, dpt.usm_ndarray): + return obj.shape, obj.dtype, frozenset([obj.sycl_queue]) + if isinstance(obj, np.ndarray): + return obj.shape, obj.dtype, _host_set + if isinstance(obj, range): + return (len(obj),), int, _host_set + if isinstance(obj, bool): + return _empty_tuple, bool, _host_set + if isinstance(obj, float): + return _empty_tuple, float, _host_set + if isinstance(obj, int): + return _empty_tuple, int, _host_set + if isinstance(obj, complex): + return _empty_tuple, complex, _host_set + if isinstance( + obj, + ( + list, + tuple, + ), + ): + return _array_info_sequence(obj) + if _is_object_with_buffer_protocol(obj): + np_obj = np.array(obj) + return np_obj.shape, np_obj.dtype, _host_set + if hasattr(obj, "__usm_ndarray__"): + usm_ar = obj.__usm_ndarray__ + if isinstance(usm_ar, dpt.usm_ndarray): + return usm_ar.shape, usm_ar.dtype, frozenset([usm_ar.sycl_queue]) + if hasattr(obj, "__sycl_usm_array_interface__"): + usm_ar = _usm_ndarray_from_suai(obj) + return usm_ar.shape, usm_ar.dtype, frozenset([usm_ar.sycl_queue]) + + +def _array_info_sequence(li): + if not isinstance(li, (list, tuple, range)): + raise TypeError(f"Expected list, tuple, or range, got {type(li)}") + n = len(li) + dim = None + dt = None + device = frozenset() + for el in li: + el_dim, el_dt, el_dev = _array_info_dispatch(el) + if dim is None: + dim = el_dim + dt = np.promote_types(el_dt, el_dt) + device = device.union(el_dev) + elif el_dim == dim: + dt = np.promote_types(dt, el_dt) + device = device.union(el_dev) + else: + raise ValueError(f"Inconsistent dimensions, {dim} and {el_dim}") + if dim is None: + dim = () + dt = float + device = _host_set + return (n,) + dim, dt, device + + +def _asarray_from_numpy_ndarray( + ary, dtype=None, usm_type=None, sycl_queue=None, order="K" +): + if not isinstance(ary, np.ndarray): + raise TypeError(f"Expected numpy.ndarray, got {type(ary)}") + if usm_type is None: + usm_type = "device" + copy_q = normalize_queue_device(sycl_queue=None, device=sycl_queue) + if ary.dtype.char not in "?bBhHiIlLqQefdFD": + raise TypeError( + f"Numpy array of data type {ary.dtype} is not supported. " + "Please convert the input to an array with numeric data type." + ) + if dtype is None: + # deduce device-representable output data type + dtype = _map_to_device_dtype(ary.dtype, copy_q) + _ensure_native_dtype_device_support(dtype, copy_q.sycl_device) + f_contig = ary.flags["F"] + c_contig = ary.flags["C"] + fc_contig = f_contig or c_contig + if order == "A": + order = "F" if f_contig and not c_contig else "C" + if order == "K" and fc_contig: + order = "C" if c_contig else "F" + if order == "K": + # new USM allocation + res = _from_numpy_empty_like_orderK(ary, dtype, usm_type, copy_q) + else: + res = dpt.usm_ndarray( + ary.shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": copy_q}, + ) + res[...] = ary + return res + + +def _asarray_from_seq( + seq_obj, + seq_shape, + seq_dt, + alloc_q, + exec_q, + dtype=None, + usm_type=None, + order="C", +): + """`seq_obj` is a sequence""" + if usm_type is None: + usm_types_in_seq = [] + _usm_types_walker(seq_obj, usm_types_in_seq) + usm_type = dpctl.utils.get_coerced_usm_type(usm_types_in_seq) + dpctl.utils.validate_usm_type(usm_type) + if dtype is None: + dtype = _map_to_device_dtype(seq_dt, alloc_q) + else: + _mapped_dt = _map_to_device_dtype(dtype, alloc_q) + if _mapped_dt != dtype: + raise ValueError( + f"Device {alloc_q.sycl_device} " + f"does not support {dtype} natively." + ) + dtype = _mapped_dt + if order in "KA": + order = "C" + if isinstance(exec_q, dpctl.SyclQueue): + res = dpt_ext.empty( + seq_shape, + dtype=dtype, + usm_type=usm_type, + sycl_queue=alloc_q, + order=order, + ) + _manager = dpctl.utils.SequentialOrderManager[exec_q] + _device_copy_walker(seq_obj, res, _manager) + return res + else: + res = dpt_ext.empty( + seq_shape, + dtype=dtype, + usm_type=usm_type, + sycl_queue=alloc_q, + order=order, + ) + _copy_through_host_walker(seq_obj, res) + return res + + +def _asarray_from_seq_single_device( + obj, + seq_shape, + seq_dt, + seq_dev, + dtype=None, + usm_type=None, + sycl_queue=None, + order="C", +): + if sycl_queue is None: + exec_q = seq_dev + alloc_q = seq_dev + else: + exec_q = dpctl.utils.get_execution_queue( + ( + sycl_queue, + seq_dev, + ) + ) + alloc_q = sycl_queue + return _asarray_from_seq( + obj, + seq_shape, + seq_dt, + alloc_q, + exec_q, + dtype=dtype, + usm_type=usm_type, + order=order, + ) + + +def _asarray_from_usm_ndarray( + usm_ndary, + dtype=None, + copy=None, + usm_type=None, + sycl_queue=None, + order="K", +): + if not isinstance(usm_ndary, dpt.usm_ndarray): + raise TypeError( + f"Expected dpctl.tensor.usm_ndarray, got {type(usm_ndary)}" + ) + if usm_type is None: + usm_type = usm_ndary.usm_type + if sycl_queue is not None: + exec_q = dpctl.utils.get_execution_queue( + [usm_ndary.sycl_queue, sycl_queue] + ) + copy_q = normalize_queue_device(sycl_queue=sycl_queue, device=exec_q) + else: + copy_q = usm_ndary.sycl_queue + if dtype is None: + dtype = _map_to_device_dtype(usm_ndary.dtype, copy_q) + # Conditions for zero copy: + can_zero_copy = copy is not True + # dtype is unchanged + can_zero_copy = can_zero_copy and dtype == usm_ndary.dtype + # USM allocation type is unchanged + can_zero_copy = can_zero_copy and usm_type == usm_ndary.usm_type + # sycl_queue is unchanged + can_zero_copy = can_zero_copy and copy_q is usm_ndary.sycl_queue + # order is unchanged + c_contig = usm_ndary.flags.c_contiguous + f_contig = usm_ndary.flags.f_contiguous + fc_contig = usm_ndary.flags.forc + if can_zero_copy: + if order == "C" and c_contig: + pass + elif order == "F" and f_contig: + pass + elif order == "A" and fc_contig: + pass + elif order == "K": + pass + else: + can_zero_copy = False + if copy is False and can_zero_copy is False: + raise ValueError("asarray(..., copy=False) is not possible") + if can_zero_copy: + return usm_ndary + if order == "A": + order = "F" if f_contig and not c_contig else "C" + if order == "K" and fc_contig: + order = "C" if c_contig else "F" + if order == "K": + _ensure_native_dtype_device_support(dtype, copy_q.sycl_device) + res = _empty_like_orderK(usm_ndary, dtype, usm_type, copy_q) + else: + _ensure_native_dtype_device_support(dtype, copy_q.sycl_device) + res = dpt.usm_ndarray( + usm_ndary.shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": copy_q}, + ) + eq = dpctl.utils.get_execution_queue([usm_ndary.sycl_queue, copy_q]) + if eq is not None: + _manager = dpctl.utils.SequentialOrderManager[eq] + dep_evs = _manager.submitted_events + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=usm_ndary, dst=res, sycl_queue=eq, depends=dep_evs + ) + _manager.add_event_pair(hev, cpy_ev) + else: + tmp = dpt_ext.asnumpy(usm_ndary) + res[...] = tmp + return res + def _cast_fill_val(fill_val, dt): """ @@ -58,6 +333,99 @@ def _cast_fill_val(fill_val, dt): return fill_val +def _coerce_and_infer_dt(*args, dt, sycl_queue, err_msg, allow_bool=False): + """Deduce arange type from sequence spec""" + nd, seq_dt, d = _array_info_sequence(args) + if d != _host_set or nd != (len(args),): + raise ValueError(err_msg) + dt = _get_dtype(dt, sycl_queue, ref_type=seq_dt) + if np.issubdtype(dt, np.integer): + return tuple(int(v) for v in args), dt + if np.issubdtype(dt, np.floating): + return tuple(float(v) for v in args), dt + if np.issubdtype(dt, np.complexfloating): + return tuple(complex(v) for v in args), dt + if allow_bool and dt.char == "?": + return tuple(bool(v) for v in args), dt + raise ValueError(f"Data type {dt} is not supported") + + +def _copy_through_host_walker(seq_o, usm_res): + if isinstance(seq_o, dpt.usm_ndarray): + if ( + dpctl.utils.get_execution_queue( + ( + usm_res.sycl_queue, + seq_o.sycl_queue, + ) + ) + is None + ): + usm_res[...] = dpt.asnumpy(seq_o).copy() + return + else: + usm_res[...] = seq_o + if hasattr(seq_o, "__usm_ndarray__"): + usm_arr = seq_o.__usm_ndarray__ + if isinstance(usm_arr, dpt.usm_ndarray): + _copy_through_host_walker(usm_arr, usm_res) + return + if hasattr(seq_o, "__sycl_usm_array_interface__"): + usm_ar = _usm_ndarray_from_suai(seq_o) + if ( + dpctl.utils.get_execution_queue( + ( + usm_res.sycl_queue, + usm_ar.sycl_queue, + ) + ) + is None + ): + usm_res[...] = dpt_ext.asnumpy(usm_ar).copy() + else: + usm_res[...] = usm_ar + return + if _is_object_with_buffer_protocol(seq_o): + np_ar = np.asarray(seq_o) + usm_res[...] = np_ar + return + if isinstance(seq_o, (list, tuple)): + for i, el in enumerate(seq_o): + _copy_through_host_walker(el, usm_res[i]) + return + usm_res[...] = np.asarray(seq_o) + + +def _device_copy_walker(seq_o, res, _manager): + if isinstance(seq_o, dpt.usm_ndarray): + exec_q = res.sycl_queue + deps = _manager.submitted_events + ht_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=seq_o, dst=res, sycl_queue=exec_q, depends=deps + ) + _manager.add_event_pair(ht_ev, cpy_ev) + return + if hasattr(seq_o, "__usm_ndarray__"): + usm_arr = seq_o.__usm_ndarray__ + if isinstance(usm_arr, dpt.usm_ndarray): + _device_copy_walker(usm_arr, res, _manager) + return + if hasattr(seq_o, "__sycl_usm_array_interface__"): + usm_ar = _usm_ndarray_from_suai(seq_o) + exec_q = res.sycl_queue + deps = _manager.submitted_events + ht_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=usm_ar, dst=res, sycl_queue=exec_q, depends=deps + ) + _manager.add_event_pair(ht_ev, cpy_ev) + return + if isinstance(seq_o, (list, tuple)): + for i, el in enumerate(seq_o): + _device_copy_walker(el, res[i], _manager) + return + raise TypeError + + def _ensure_native_dtype_device_support(dtype, dev) -> None: """Check that dtype is natively supported by device. @@ -90,54 +458,1091 @@ def _ensure_native_dtype_device_support(dtype, dev) -> None: ) -def _to_scalar(obj, sc_ty): - """A way to convert object to NumPy scalar type. - Raises OverflowError if obj can not be represented - using the requested scalar type. - """ - zd_arr = np.asarray(obj, dtype=sc_ty) - return zd_arr[()] +def _get_arange_length(start, stop, step): + """Compute length of arange sequence""" + span = stop - start + if hasattr(step, "__float__") and hasattr(span, "__float__"): + return _round_for_arange(span / step) + tmp = span / step + if hasattr(tmp, "__complex__"): + tmp = complex(tmp) + tmp = tmp.real + else: + tmp = float(tmp) + return _round_for_arange(tmp) + + +def _map_to_device_dtype(dt, q): + dtc = dt.char + if dtc == "?" or np.issubdtype(dt, np.integer): + return dt + d = q.sycl_device + if np.issubdtype(dt, np.floating): + if dtc == "f": + return dt + if dtc == "d" and d.has_aspect_fp64: + return dt + if dtc == "e" and d.has_aspect_fp16: + return dt + return dpt.dtype("f4") + if np.issubdtype(dt, np.complexfloating): + if dtc == "F": + return dt + if dtc == "D" and d.has_aspect_fp64: + return dt + return dpt.dtype("c8") + raise RuntimeError(f"Unrecognized data type '{dt}' encountered.") + + +def _normalize_order(order, arr): + """ + Utility function for processing the `order` keyword of array-like + constructors, which support `"K"` and `"A"` orders. + """ + arr_flags = arr.flags + f_contig = arr_flags["F"] + c_contig = arr_flags["C"] + if order == "A": + order = "F" if f_contig and not c_contig else "C" + if order == "K" and (f_contig or c_contig): + order = "C" if c_contig else "F" + return order + + +def _round_for_arange(tmp): + k = int(tmp) + if k >= 0 and float(k) < tmp: + tmp = tmp + 1 + return tmp + + +def _to_scalar(obj, sc_ty): + """A way to convert object to NumPy scalar type. + Raises OverflowError if obj can not be represented + using the requested scalar type. + """ + zd_arr = np.asarray(obj, dtype=sc_ty) + return zd_arr[()] + + +def _usm_ndarray_from_suai(obj): + sua_iface = obj.__sycl_usm_array_interface__ + membuf = dpm.as_usm_memory(obj) + ary = dpt.usm_ndarray( + sua_iface["shape"], + dtype=sua_iface["typestr"], + buffer=membuf, + strides=sua_iface.get("strides", None), + ) + _data_field = sua_iface["data"] + if isinstance(_data_field, tuple) and len(_data_field) > 1: + ro_field = _data_field[1] + else: + ro_field = False + if ro_field: + ary.flags["W"] = False + return ary + + +def _usm_types_walker(o, usm_types_list): + if isinstance(o, dpt.usm_ndarray): + usm_types_list.append(o.usm_type) + return + if hasattr(o, "__usm_ndarray__"): + usm_arr = o.__usm_ndarray__ + if isinstance(usm_arr, dpt.usm_ndarray): + usm_types_list.append(usm_arr.usm_type) + return + if hasattr(o, "__sycl_usm_array_interface__"): + usm_ar = _usm_ndarray_from_suai(o) + usm_types_list.append(usm_ar.usm_type) + return + if _is_object_with_buffer_protocol(o): + return + if isinstance(o, (int, bool, float, complex)): + return + if isinstance(o, (list, tuple, range)): + for el in o: + _usm_types_walker(el, usm_types_list) + return + raise TypeError + + +def arange( + start, + /, + stop=None, + step=1, + *, + dtype=None, + device=None, + usm_type="device", + sycl_queue=None, +): + """ + Returns evenly spaced values within the half-open interval [start, stop) + as a one-dimensional array. + + Args: + start: + Starting point of the interval + stop: + Ending point of the interval. Default: ``None`` + step: Increment of the returned sequence. Default: ``1`` + dtype: Output array data type. Default: ``None`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + Array populated with evenly spaced values. + """ + if stop is None: + stop = start + start = 0 + if step is None: + step = 1 + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + is_bool = False + if dtype: + is_bool = (dtype is bool) or (dpt.dtype(dtype) == dpt.bool) + _, dt = _coerce_and_infer_dt( + start, + stop, + step, + dt=dpt.int8 if is_bool else dtype, + sycl_queue=sycl_queue, + err_msg="start, stop, and step must be Python scalars", + allow_bool=False, + ) + try: + tmp = _get_arange_length(start, stop, step) + sh = max(int(tmp), 0) + except TypeError: + sh = 0 + if is_bool and sh > 2: + raise ValueError("no fill-function for boolean data type") + res = dpt.usm_ndarray( + (sh,), + dtype=dt, + buffer=usm_type, + order="C", + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + sc_ty = dt.type + _first = _to_scalar(start, sc_ty) + if sh > 1: + _second = _to_scalar(start + step, sc_ty) + if dt in [dpt.uint8, dpt.uint16, dpt.uint32, dpt.uint64]: + int64_ty = dpt.int64.type + _step = int64_ty(_second) - int64_ty(_first) + else: + _step = _second - _first + _step = sc_ty(_step) + else: + _step = sc_ty(1) + _start = _first + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating newly allocated array, no task dependencies + hev, lin_ev = ti._linspace_step(_start, _step, res, sycl_queue) + _manager.add_event_pair(hev, lin_ev) + if is_bool: + res_out = dpt.usm_ndarray( + (sh,), + dtype=dpt.bool, + buffer=usm_type, + order="C", + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + hev_cpy, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=res, dst=res_out, sycl_queue=sycl_queue, depends=[lin_ev] + ) + _manager.add_event_pair(hev_cpy, cpy_ev) + return res_out + return res + + +def asarray( + obj, + /, + *, + dtype=None, + device=None, + copy=None, + usm_type=None, + sycl_queue=None, + order="K", +): + """ + Converts input object to :class:`dpctl.tensor.usm_ndarray`. + + Args: + obj: Python object to convert. Can be an instance of + :class:`dpctl.tensor.usm_ndarray`, + an object representing SYCL USM allocation and implementing + ``__sycl_usm_array_interface__`` protocol, an instance + of :class:`numpy.ndarray`, an object supporting Python buffer + protocol, a Python scalar, or a (possibly nested) sequence of + Python scalars. + dtype (data type, optional): + output array data type. If ``dtype`` is + ``None``, the output array data type is inferred from data types in + ``obj``. Default: ``None`` + copy (`bool`, optional): + boolean indicating whether or not to copy the + input. If ``True``, always creates a copy. If ``False``, the + need to copy raises :exc:`ValueError`. If ``None``, tries to reuse + existing memory allocations if possible, but allows to perform + a copy otherwise. Default: ``None`` + order (``"C"``, ``"F"``, ``"A"``, ``"K"``, optional): + memory layout of the output array. Default: ``"K"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + Array created from input object. + """ + # 1. Check that copy is a valid keyword + if copy not in [None, True, False]: + raise TypeError( + "Recognized copy keyword values should be True, False, or None" + ) + # 2. Check that dtype is None, or a valid dtype + if dtype is not None: + dtype = dpt.dtype(dtype) + # 3. Validate order + if not isinstance(order, str): + raise TypeError( + f"Expected order keyword to be of type str, got {type(order)}" + ) + if len(order) == 0 or order[0] not in "KkAaCcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'K', 'A', 'F', or 'C'." + ) + order = order[0].upper() + # 4. Check that usm_type is None, or a valid value + dpctl.utils.validate_usm_type(usm_type, allow_none=True) + # 5. Normalize device/sycl_queue [keep it None if was None] + if device is not None or sycl_queue is not None: + sycl_queue = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + + # handle instance(obj, usm_ndarray) + if isinstance(obj, dpt.usm_ndarray): + return _asarray_from_usm_ndarray( + obj, + dtype=dtype, + copy=copy, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + if hasattr(obj, "__usm_ndarray__"): + usm_arr = obj.__usm_ndarray__ + if isinstance(usm_arr, dpt.usm_ndarray): + return _asarray_from_usm_ndarray( + usm_arr, + dtype=dtype, + copy=copy, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + if hasattr(obj, "__sycl_usm_array_interface__"): + ary = _usm_ndarray_from_suai(obj) + return _asarray_from_usm_ndarray( + ary, + dtype=dtype, + copy=copy, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + if isinstance(obj, np.ndarray): + if copy is False: + raise ValueError( + "Converting numpy.ndarray to usm_ndarray requires a copy" + ) + return _asarray_from_numpy_ndarray( + obj, + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + if _is_object_with_buffer_protocol(obj): + if copy is False: + raise ValueError( + f"Converting {type(obj)} to usm_ndarray requires a copy" + ) + return _asarray_from_numpy_ndarray( + np.array(obj), + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + if isinstance(obj, (list, tuple, range)): + if copy is False: + raise ValueError( + "Converting Python sequence to usm_ndarray requires a copy" + ) + seq_shape, seq_dt, devs = _array_info_sequence(obj) + if devs == _host_set: + return _asarray_from_numpy_ndarray( + np.asarray(obj, dtype=dtype, order=order), + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + elif len(devs) == 1: + seq_dev = list(devs)[0] + return _asarray_from_seq_single_device( + obj, + seq_shape, + seq_dt, + seq_dev, + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue, + order=order, + ) + elif len(devs) > 1: + devs = [dev for dev in devs if dev is not None] + if sycl_queue is None: + if len(devs) == 1: + alloc_q = devs[0] + else: + raise dpctl.utils.ExecutionPlacementError( + "Please specify `device` or `sycl_queue` keyword " + "argument to determine where to allocate the " + "resulting array." + ) + else: + alloc_q = sycl_queue + return _asarray_from_seq( + obj, + seq_shape, + seq_dt, + alloc_q, + # force copying via host + None, + dtype=dtype, + usm_type=usm_type, + order=order, + ) + if copy is False: + raise ValueError( + f"Converting {type(obj)} to usm_ndarray requires a copy" + ) + # obj is a scalar, create 0d array + return _asarray_from_numpy_ndarray( + np.asarray(obj, dtype=dtype), + dtype=dtype, + usm_type=usm_type, + sycl_queue=sycl_queue, + order="C", + ) + + +def empty( + shape, + *, + dtype=None, + order="C", + device=None, + usm_type="device", + sycl_queue=None, +): + """ + Creates :class:`dpctl.tensor.usm_ndarray` from uninitialized + USM allocation. + + Args: + shape (Tuple[int], int): + Dimensions of the array to be created. + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, + or a NumPy scalar type. The ``None`` value creates an + array of floating point data type. Default: ``None`` + order (``"C"``, or ``F"``): + memory layout for the array. Default: ``"C"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + Created empty array. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = _get_dtype(dtype, sycl_queue) + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = dpt.usm_ndarray( + shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + return res + + +def empty_like( + x, /, *, dtype=None, order="K", device=None, usm_type=None, sycl_queue=None +): + """ + Returns an uninitialized :class:`dpctl.tensor.usm_ndarray` with the + same `shape` as the input array `x`. + + Args: + x (usm_ndarray): + Input array from which to derive the output array shape. + dtype (optional): + data type of the array. Can be a typestring, + a :class:`numpy.dtype` object, NumPy char string, + or a NumPy scalar type. Default: ``None`` + order ("C", "F", "A", or "K"): + memory layout for the array. Default: ``"K"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation. Default: ``None`` + + Returns: + usm_ndarray: + Created empty array with uninitialized memory. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected instance of dpt.usm_ndarray, got {type(x)}.") + if ( + not isinstance(order, str) + or len(order) == 0 + or order[0] not in "CcFfAaKk" + ): + raise ValueError( + "Unrecognized order keyword value, expecting 'C', 'F', 'A', or 'K'." + ) + order = order[0].upper() + if dtype is None: + dtype = x.dtype + if usm_type is None: + usm_type = x.usm_type + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if device is None and sycl_queue is None: + device = x.device + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = dpt.dtype(dtype) + order = _normalize_order(order, x) + if order == "K": + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + return _empty_like_orderK(x, dtype, usm_type, sycl_queue) + else: + shape = x.shape + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = dpt.usm_ndarray( + shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + return res + + +def eye( + n_rows, + n_cols=None, + /, + *, + k=0, + dtype=None, + order="C", + device=None, + usm_type="device", + sycl_queue=None, +): + """ + eye(n_rows, n_cols=None, /, *, k=0, dtype=None, \ + device=None, usm_type="device", sycl_queue=None) + + Creates :class:`dpctl.tensor.usm_ndarray` with ones on the `k`-th + diagonal. + + Args: + n_rows (int): + number of rows in the output array. + n_cols (int, optional): + number of columns in the output array. If ``None``, + ``n_cols = n_rows``. Default: ``None`` + k (int): + index of the diagonal, with ``0`` as the main diagonal. + A positive value of ``k`` is a superdiagonal, a negative value + is a subdiagonal. + Raises :exc:`TypeError` if ``k`` is not an integer. + Default: ``0`` + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, or + a NumPy scalar type. Default: ``None`` + order ("C" or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): + array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + A diagonal matrix. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + n_rows = operator.index(n_rows) + n_cols = n_rows if n_cols is None else operator.index(n_cols) + k = operator.index(k) + if k >= n_cols or -k >= n_rows: + return dpt_ext.zeros( + (n_rows, n_cols), + dtype=dtype, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = _get_dtype(dtype, sycl_queue) + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = dpt.usm_ndarray( + (n_rows, n_cols), + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + if n_rows != 0 and n_cols != 0: + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + hev, eye_ev = ti._eye(k, dst=res, sycl_queue=sycl_queue) + _manager.add_event_pair(hev, eye_ev) + return res + + +def _validate_fill_value(fill_val): + """Validates that `fill_val` is a numeric or boolean scalar.""" + # TODO: verify if `np.True_` and `np.False_` should be instances of + # Number in NumPy, like other NumPy scalars and like Python bools + # check for `np.bool_` separately as NumPy<2 has no `np.bool` + if not isinstance(fill_val, Number) and not isinstance(fill_val, np.bool_): + raise TypeError( + f"array cannot be filled with scalar of type {type(fill_val)}" + ) + + +def full( + shape, + fill_value, + *, + dtype=None, + order="C", + device=None, + usm_type=None, + sycl_queue=None, +): + """ + Returns a new :class:`dpctl.tensor.usm_ndarray` having a specified + shape and filled with `fill_value`. + + Args: + shape (tuple): + Dimensions of the array to be created. + fill_value (int,float,complex,usm_ndarray): + fill value + dtype (optional): data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, + or a NumPy scalar type. Default: ``None`` + order ("C", or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + New array initialized with given value. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + dpctl.utils.validate_usm_type(usm_type, allow_none=True) + + if isinstance(fill_value, (dpt.usm_ndarray, np.ndarray, tuple, list)): + if ( + isinstance(fill_value, dpt.usm_ndarray) + and sycl_queue is None + and device is None + ): + sycl_queue = fill_value.sycl_queue + else: + sycl_queue = normalize_queue_device( + sycl_queue=sycl_queue, device=device + ) + X = dpt_ext.asarray( + fill_value, + dtype=dtype, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + return dpt_ext.copy(dpt_ext.broadcast_to(X, shape), order=order) + else: + _validate_fill_value(fill_value) + + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + usm_type = usm_type if usm_type is not None else "device" + dtype = _get_dtype(dtype, sycl_queue, ref_type=type(fill_value)) + res = dpt.usm_ndarray( + shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + fill_value = _cast_fill_val(fill_value, dtype) + + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(fill_value, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) + return res + + +def full_like( + x, + /, + fill_value, + *, + dtype=None, + order="K", + device=None, + usm_type=None, + sycl_queue=None, +): + """full_like(x, fill_value, dtype=None, order="K", \ + device=None, usm_type=None, sycl_queue=None) + + Returns a new :class:`dpctl.tensor.usm_ndarray` filled with `fill_value` + and having the same `shape` as the input array `x`. + + Args: + x (usm_ndarray): Input array from which to derive the output array + shape. + fill_value: the value to fill output array with + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, or a + NumPy scalar type. If ``dtype`` is ``None``, the output array data + type is inferred from ``x``. Default: ``None`` + order ("C", "F", "A", or "K"): + memory layout for the array. Default: ``"K"`` + device (optional): + array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + New array initialized with given value. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected instance of dpt.usm_ndarray, got {type(x)}.") + if ( + not isinstance(order, str) + or len(order) == 0 + or order[0] not in "CcFfAaKk" + ): + raise ValueError( + "Unrecognized order keyword value, expecting 'C', 'F', 'A', or 'K'." + ) + order = order[0].upper() + if dtype is None: + dtype = x.dtype + if usm_type is None: + usm_type = x.usm_type + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if device is None and sycl_queue is None: + device = x.device + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + sh = x.shape + dtype = dpt.dtype(dtype) + order = _normalize_order(order, x) + if order == "K": + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + if isinstance(fill_value, (dpt.usm_ndarray, np.ndarray, tuple, list)): + X = dpt_ext.asarray( + fill_value, + dtype=dtype, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + X = dpt_ext.broadcast_to(X, sh) + res = _empty_like_orderK(x, dtype, usm_type, sycl_queue) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # order copy after tasks populating X + dep_evs = _manager.submitted_events + hev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=X, dst=res, sycl_queue=sycl_queue, depends=dep_evs + ) + _manager.add_event_pair(hev, copy_ev) + return res + else: + _validate_fill_value(fill_value) + + dtype = _get_dtype(dtype, sycl_queue, ref_type=type(fill_value)) + res = _empty_like_orderK(x, dtype, usm_type, sycl_queue) + fill_value = _cast_fill_val(fill_value, dtype) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(fill_value, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) + return res + else: + return full( + sh, + fill_value, + dtype=dtype, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) + + +def linspace( + start, + stop, + /, + num, + *, + dtype=None, + device=None, + endpoint=True, + sycl_queue=None, + usm_type="device", +): + """ + linspace(start, stop, num, dtype=None, device=None, endpoint=True, \ + sycl_queue=None, usm_type="device") + + Returns :class:`dpctl.tensor.usm_ndarray` array populated with + evenly spaced numbers of specified interval. + + Args: + start: + the start of the interval. + stop: + the end of the interval. If the ``endpoint`` is ``False``, the + function generates ``num+1`` evenly spaced points starting + with ``start`` and ending with ``stop`` and exclude the + ``stop`` from the returned array such that the returned array + consists of evenly spaced numbers over the half-open interval + ``[start, stop)``. If ``endpoint`` is ``True``, the output + array consists of evenly spaced numbers over the closed + interval ``[start, stop]``. Default: ``True`` + num (int): + number of samples. Must be a non-negative integer; otherwise, + the function raises ``ValueError`` exception. + dtype: + output array data type. Should be a floating data type. + If ``dtype`` is ``None``, the output array must be the default + floating point data type for target device. + Default: ``None`` + device (optional): + array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + endpoint: boolean indicating whether to include ``stop`` in the + interval. Default: ``True`` + + Returns: + usm_ndarray: + Array populated with evenly spaced numbers in the requested + interval. + """ + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if endpoint not in [True, False]: + raise TypeError("endpoint keyword argument must be of boolean type") + + num = operator.index(num) + if num < 0: + raise ValueError("Number of points must be non-negative") + + _, dt = _coerce_and_infer_dt( + start, + stop, + dt=dtype, + sycl_queue=sycl_queue, + err_msg="start and stop must be Python scalars.", + allow_bool=True, + ) + + int_dt = None + if np.issubdtype(dt, np.integer): + if dtype is not None: + int_dt = dt + dt = ti.default_device_fp_type(sycl_queue) + dt = dpt.dtype(dt) + start = float(start) + stop = float(stop) + + res = dpt_ext.empty(num, dtype=dt, usm_type=usm_type, sycl_queue=sycl_queue) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + hev, la_ev = ti._linspace_affine( + start, stop, dst=res, include_endpoint=endpoint, sycl_queue=sycl_queue + ) + _manager.add_event_pair(hev, la_ev) + + return res if int_dt is None else dpt_ext.astype(res, int_dt) + + +def meshgrid(*arrays, indexing="xy"): + """ + Creates list of :class:`dpctl.tensor.usm_ndarray` coordinate matrices + from vectors. + + Args: + arrays (usm_ndarray): + an arbitrary number of one-dimensional arrays + representing grid coordinates. Each array should have the same + numeric data type. + indexing (``"xy"``, or ``"ij"``): + Cartesian (``"xy"``) or matrix (``"ij"``) indexing of output. + If provided zero or one one-dimensional vector(s) (i.e., the + zero- and one-dimensional cases, respectively), the ``indexing`` + keyword has no effect and should be ignored. Default: ``"xy"`` + + Returns: + List[array]: + list of ``N`` arrays, where ``N`` is the number of + provided one-dimensional input arrays. Each returned array must + have rank ``N``. + For a set of ``n`` vectors with lengths ``N0``, ``N1``, ``N2``, ... + The cartesian indexing results in arrays of shape + ``(N1, N0, N2, ...)``, while the + matrix indexing results in arrays of shape + ``(N0, N1, N2, ...)``. + Default: ``"xy"``. + + Raises: + ValueError: If vectors are not of the same data type, or are not + one-dimensional. + + """ + ref_dt = None + ref_unset = True + for array in arrays: + if not isinstance(array, dpt.usm_ndarray): + raise TypeError( + f"Expected instance of dpt.usm_ndarray, got {type(array)}." + ) + if array.ndim != 1: + raise ValueError("All arrays must be one-dimensional.") + if ref_unset: + ref_unset = False + ref_dt = array.dtype + else: + if not ref_dt == array.dtype: + raise ValueError( + "All arrays must be of the same numeric data type." + ) + if indexing not in ["xy", "ij"]: + raise ValueError( + "Unrecognized indexing keyword value, expecting 'xy' or 'ij.'" + ) + n = len(arrays) + if n == 0: + return [] + + sh = (-1,) + (1,) * (n - 1) + + res = [] + if n > 1 and indexing == "xy": + res.append(dpt_ext.reshape(arrays[0], (1, -1) + sh[2:], copy=True)) + res.append(dpt_ext.reshape(arrays[1], sh, copy=True)) + arrays, sh = arrays[2:], sh[-2:] + sh[:-2] + + for array in arrays: + res.append(dpt_ext.reshape(array, sh, copy=True)) + sh = sh[-1:] + sh[:-1] + + output = dpt_ext.broadcast_arrays(*res) + return output -def eye( - n_rows, - n_cols=None, - /, + +def ones( + shape, *, - k=0, dtype=None, order="C", device=None, usm_type="device", sycl_queue=None, ): - """ - eye(n_rows, n_cols=None, /, *, k=0, dtype=None, \ - device=None, usm_type="device", sycl_queue=None) + """ones(shape, dtype=None, order="C", \ + device=None, usm_type="device", sycl_queue=None) - Creates :class:`dpctl.tensor.usm_ndarray` with ones on the `k`-th - diagonal. + Returns a new :class:`dpctl.tensor.usm_ndarray` having a specified + shape and filled with ones. Args: - n_rows (int): - number of rows in the output array. - n_cols (int, optional): - number of columns in the output array. If ``None``, - ``n_cols = n_rows``. Default: ``None`` - k (int): - index of the diagonal, with ``0`` as the main diagonal. - A positive value of ``k`` is a superdiagonal, a negative value - is a subdiagonal. - Raises :exc:`TypeError` if ``k`` is not an integer. - Default: ``0`` + shape (Tuple[int], int): + Dimensions of the array to be created. dtype (optional): data type of the array. Can be typestring, - a :class:`numpy.dtype` object, :mod:`numpy` char string, or - a NumPy scalar type. Default: ``None`` - order ("C" or "F"): - memory layout for the array. Default: ``"C"`` - device (optional): - array API concept of device where the output array + a :class:`numpy.dtype` object, :mod:`numpy` char string, + or a NumPy scalar type. Default: ``None`` + order ("C", or "F"): memory layout for the array. Default: ``"C"`` + device (optional): array API concept of device where the output array is created. ``device`` can be ``None``, a oneAPI filter selector string, an instance of :class:`dpctl.SyclDevice` corresponding to a non-partitioned SYCL device, an instance of @@ -158,79 +1563,48 @@ def eye( Returns: usm_ndarray: - A diagonal matrix. + Created array initialized with ones. """ if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": raise ValueError( "Unrecognized order keyword value, expecting 'F' or 'C'." ) order = order[0].upper() - n_rows = operator.index(n_rows) - n_cols = n_rows if n_cols is None else operator.index(n_cols) - k = operator.index(k) - if k >= n_cols or -k >= n_rows: - return dpt.zeros( - (n_rows, n_cols), - dtype=dtype, - order=order, - device=device, - usm_type=usm_type, - sycl_queue=sycl_queue, - ) dpctl.utils.validate_usm_type(usm_type, allow_none=False) sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) dtype = _get_dtype(dtype, sycl_queue) - _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) res = dpt.usm_ndarray( - (n_rows, n_cols), + shape, dtype=dtype, buffer=usm_type, order=order, buffer_ctor_kwargs={"queue": sycl_queue}, ) - if n_rows != 0 and n_cols != 0: - _manager = dpctl.utils.SequentialOrderManager[sycl_queue] - hev, eye_ev = ti._eye(k, dst=res, sycl_queue=sycl_queue) - _manager.add_event_pair(hev, eye_ev) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(1, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) return res -def _validate_fill_value(fill_val): - """Validates that `fill_val` is a numeric or boolean scalar.""" - # TODO: verify if `np.True_` and `np.False_` should be instances of - # Number in NumPy, like other NumPy scalars and like Python bools - # check for `np.bool_` separately as NumPy<2 has no `np.bool` - if not isinstance(fill_val, Number) and not isinstance(fill_val, np.bool_): - raise TypeError( - f"array cannot be filled with scalar of type {type(fill_val)}" - ) - - -def full( - shape, - fill_value, - *, - dtype=None, - order="C", - device=None, - usm_type=None, - sycl_queue=None, +def ones_like( + x, /, *, dtype=None, order="K", device=None, usm_type=None, sycl_queue=None ): """ - Returns a new :class:`dpctl.tensor.usm_ndarray` having a specified - shape and filled with `fill_value`. + Returns a new :class:`dpctl.tensor.usm_ndarray` filled with ones and + having the same `shape` as the input array `x`. Args: - shape (tuple): - Dimensions of the array to be created. - fill_value (int,float,complex,usm_ndarray): - fill value - dtype (optional): data type of the array. Can be typestring, + x (usm_ndarray): + Input array from which to derive the output array shape + dtype (optional): + data type of the array. Can be typestring, a :class:`numpy.dtype` object, :mod:`numpy` char string, - or a NumPy scalar type. Default: ``None`` - order ("C", or "F"): + or a NumPy scalar type. Default: `None` + order ("C", "F", "A", or "K"): memory layout for the array. Default: ``"C"`` - device (optional): array API concept of device where the output array + device (optional): + array API concept of device where the output array is created. ``device`` can be ``None``, a oneAPI filter selector string, an instance of :class:`dpctl.SyclDevice` corresponding to a non-partitioned SYCL device, an instance of @@ -251,54 +1625,47 @@ def full( Returns: usm_ndarray: - New array initialized with given value. + New array initialized with ones. """ - if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected instance of dpt.usm_ndarray, got {type(x)}.") + if ( + not isinstance(order, str) + or len(order) == 0 + or order[0] not in "CcFfAaKk" + ): raise ValueError( - "Unrecognized order keyword value, expecting 'F' or 'C'." + "Unrecognized order keyword value, expecting 'C', 'F', 'A', or 'K'." ) order = order[0].upper() - dpctl.utils.validate_usm_type(usm_type, allow_none=True) - - if isinstance(fill_value, (dpt.usm_ndarray, np.ndarray, tuple, list)): - if ( - isinstance(fill_value, dpt.usm_ndarray) - and sycl_queue is None - and device is None - ): - sycl_queue = fill_value.sycl_queue - else: - sycl_queue = normalize_queue_device( - sycl_queue=sycl_queue, device=device - ) - X = dpt.asarray( - fill_value, + if dtype is None: + dtype = x.dtype + if usm_type is None: + usm_type = x.usm_type + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if device is None and sycl_queue is None: + device = x.device + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = dpt.dtype(dtype) + order = _normalize_order(order, x) + if order == "K": + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = _empty_like_orderK(x, dtype, usm_type, sycl_queue) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(1, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) + return res + else: + sh = x.shape + return ones( + sh, dtype=dtype, order=order, + device=device, usm_type=usm_type, sycl_queue=sycl_queue, ) - return dpt_ext.copy(dpt.broadcast_to(X, shape), order=order) - else: - _validate_fill_value(fill_value) - - sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) - usm_type = usm_type if usm_type is not None else "device" - dtype = _get_dtype(dtype, sycl_queue, ref_type=type(fill_value)) - res = dpt.usm_ndarray( - shape, - dtype=dtype, - buffer=usm_type, - order=order, - buffer_ctor_kwargs={"queue": sycl_queue}, - ) - fill_value = _cast_fill_val(fill_value, dtype) - - _manager = dpctl.utils.SequentialOrderManager[sycl_queue] - # populating new allocation, no dependent events - hev, full_ev = ti._full_usm_ndarray(fill_value, res, sycl_queue) - _manager.add_event_pair(hev, full_ev) - return res def tril(x, /, *, k=0): @@ -340,7 +1707,7 @@ def tril(x, /, *, k=0): q = x.sycl_queue if k >= shape[nd - 1] - 1: - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, order=order, @@ -354,7 +1721,7 @@ def tril(x, /, *, k=0): ) _manager.add_event_pair(hev, cpy_ev) elif k < -shape[nd - 2]: - res = dpt.zeros( + res = dpt_ext.zeros( x.shape, dtype=x.dtype, order=order, @@ -362,7 +1729,7 @@ def tril(x, /, *, k=0): sycl_queue=q, ) else: - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, order=order, @@ -418,7 +1785,7 @@ def triu(x, /, *, k=0): q = x.sycl_queue if k > shape[nd - 1]: - res = dpt.zeros( + res = dpt_ext.zeros( x.shape, dtype=x.dtype, order=order, @@ -426,7 +1793,7 @@ def triu(x, /, *, k=0): sycl_queue=q, ) elif k <= -shape[nd - 2] + 1: - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, order=order, @@ -440,7 +1807,7 @@ def triu(x, /, *, k=0): ) _manager.add_event_pair(hev, cpy_ev) else: - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, order=order, @@ -455,3 +1822,156 @@ def triu(x, /, *, k=0): _manager.add_event_pair(hev, triu_ev) return res + + +def zeros( + shape, + *, + dtype=None, + order="C", + device=None, + usm_type="device", + sycl_queue=None, +): + """ + Returns a new :class:`dpctl.tensor.usm_ndarray` having a specified + shape and filled with zeros. + + Args: + shape (Tuple[int], int): + Dimensions of the array to be created. + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, + or a NumPy scalar type. Default: ``None`` + order ("C", or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + Constructed array initialized with zeros. + """ + if not isinstance(order, str) or len(order) == 0 or order[0] not in "CcFf": + raise ValueError( + "Unrecognized order keyword value, expecting 'F' or 'C'." + ) + order = order[0].upper() + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = _get_dtype(dtype, sycl_queue) + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = dpt.usm_ndarray( + shape, + dtype=dtype, + buffer=usm_type, + order=order, + buffer_ctor_kwargs={"queue": sycl_queue}, + ) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, zeros_ev = ti._zeros_usm_ndarray(res, sycl_queue) + _manager.add_event_pair(hev, zeros_ev) + + return res + + +def zeros_like( + x, /, *, dtype=None, order="K", device=None, usm_type=None, sycl_queue=None +): + """ + Creates :class:`dpctl.tensor.usm_ndarray` from USM allocation + initialized with zeros. + + Args: + x (usm_ndarray): + Input array from which to derive the shape of the + output array. + dtype (optional): + data type of the array. Can be typestring, + a :class:`numpy.dtype` object, :mod:`numpy` char string, or a + NumPy scalar type. If `None`, output array has the same data + type as the input array. Default: ``None`` + order ("C", or "F"): + memory layout for the array. Default: ``"C"`` + device (optional): + array API concept of device where the output array + is created. ``device`` can be ``None``, a oneAPI filter selector + string, an instance of :class:`dpctl.SyclDevice` corresponding to + a non-partitioned SYCL device, an instance of + :class:`dpctl.SyclQueue`, or a :class:`dpctl.tensor.Device` object + returned by :attr:`dpctl.tensor.usm_ndarray.device`. + Default: ``None`` + usm_type (``"device"``, ``"shared"``, ``"host"``, optional): + The type of SYCL USM allocation for the output array. + Default: ``"device"`` + sycl_queue (:class:`dpctl.SyclQueue`, optional): + The SYCL queue to use + for output array allocation and copying. ``sycl_queue`` and + ``device`` are complementary arguments, i.e. use one or another. + If both are specified, a :exc:`TypeError` is raised unless both + imply the same underlying SYCL queue to be used. If both are + ``None``, a cached queue targeting default-selected device is + used for allocation and population. Default: ``None`` + + Returns: + usm_ndarray: + New array initialized with zeros. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected instance of dpt.usm_ndarray, got {type(x)}.") + if ( + not isinstance(order, str) + or len(order) == 0 + or order[0] not in "CcFfAaKk" + ): + raise ValueError( + "Unrecognized order keyword value, expecting 'C', 'F', 'A', or 'K'." + ) + order = order[0].upper() + if dtype is None: + dtype = x.dtype + if usm_type is None: + usm_type = x.usm_type + dpctl.utils.validate_usm_type(usm_type, allow_none=False) + if device is None and sycl_queue is None: + device = x.device + sycl_queue = normalize_queue_device(sycl_queue=sycl_queue, device=device) + dtype = dpt.dtype(dtype) + order = _normalize_order(order, x) + if order == "K": + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + res = _empty_like_orderK(x, dtype, usm_type, sycl_queue) + _manager = dpctl.utils.SequentialOrderManager[sycl_queue] + # populating new allocation, no dependent events + hev, full_ev = ti._full_usm_ndarray(0, res, sycl_queue) + _manager.add_event_pair(hev, full_ev) + return res + else: + _ensure_native_dtype_device_support(dtype, sycl_queue.sycl_device) + sh = x.shape + return zeros( + sh, + dtype=dtype, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue, + ) diff --git a/dpctl_ext/tensor/_indexing_functions.py b/dpctl_ext/tensor/_indexing_functions.py index 6ca327192f73..5b4eb1aaf7a2 100644 --- a/dpctl_ext/tensor/_indexing_functions.py +++ b/dpctl_ext/tensor/_indexing_functions.py @@ -57,7 +57,7 @@ def _get_indexing_mode(name): def _range(sh_i, i, nd, q, usm_t, dt): - ind = dpt.arange(sh_i, dtype=dt, usm_type=usm_t, sycl_queue=q) + ind = dpt_ext.arange(sh_i, dtype=dt, usm_type=usm_t, sycl_queue=q) ind.shape = tuple(sh_i if i == j else 1 for j in range(nd)) return ind @@ -177,7 +177,7 @@ def place(arr, mask, vals): raise dpctl.utils.ExecutionPlacementError if arr.shape != mask.shape or vals.ndim != 1: raise ValueError("Array sizes are not as required") - cumsum = dpt.empty(mask.size, dtype="i8", sycl_queue=exec_q) + cumsum = dpt_ext.empty(mask.size, dtype="i8", sycl_queue=exec_q) _manager = dpctl.utils.SequentialOrderManager[exec_q] deps_ev = _manager.submitted_events nz_count = ti.mask_positions( @@ -190,7 +190,7 @@ def place(arr, mask, vals): if vals.dtype == arr.dtype: rhs = vals else: - rhs = dpt.astype(vals, arr.dtype) + rhs = dpt_ext.astype(vals, arr.dtype) hev, pl_ev = ti._place( dst=arr, cumsum=cumsum, @@ -329,7 +329,7 @@ def put_vec_duplicates(vec, ind, vals): val_shape = indices.shape if not isinstance(vals, dpt.usm_ndarray): - vals = dpt.asarray( + vals = dpt_ext.asarray( vals, dtype=x.dtype, usm_type=vals_usm_type, sycl_queue=exec_q ) # choose to throw here for consistency with `place` @@ -341,7 +341,7 @@ def put_vec_duplicates(vec, ind, vals): rhs = vals else: rhs = dpt_ext.astype(vals, x.dtype) - rhs = dpt.broadcast_to(rhs, val_shape) + rhs = dpt_ext.broadcast_to(rhs, val_shape) _manager = dpctl.utils.SequentialOrderManager[exec_q] deps_ev = _manager.submitted_events @@ -540,9 +540,9 @@ def take(x, indices, /, *, axis=None, out=None, mode="wrap"): "Input and output allocation queues are not compatible" ) if ti._array_overlap(x, out): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=dt, usm_type=res_usm_type, sycl_queue=exec_q ) diff --git a/dpctl_ext/tensor/_manipulation_functions.py b/dpctl_ext/tensor/_manipulation_functions.py index f1b8b46dbcbc..08459dcaea76 100644 --- a/dpctl_ext/tensor/_manipulation_functions.py +++ b/dpctl_ext/tensor/_manipulation_functions.py @@ -40,6 +40,7 @@ import dpctl_ext.tensor._tensor_impl as ti from ._numpy_helper import normalize_axis_index, normalize_axis_tuple +from ._type_utils import _supported_dtype, _to_device_supported_dtype __doc__ = ( "Implementation module for array manipulation " @@ -47,6 +48,55 @@ ) +def _arrays_validation(arrays, check_ndim=True): + n = len(arrays) + if n == 0: + raise TypeError("Missing 1 required positional argument: 'arrays'.") + + if not isinstance(arrays, (list, tuple)): + raise TypeError(f"Expected tuple or list type, got {type(arrays)}.") + + for X in arrays: + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + exec_q = dputils.get_execution_queue([X.sycl_queue for X in arrays]) + if exec_q is None: + raise ValueError("All the input arrays must have same sycl queue.") + + res_usm_type = dputils.get_coerced_usm_type([X.usm_type for X in arrays]) + if res_usm_type is None: + raise ValueError("All the input arrays must have usm_type.") + + X0 = arrays[0] + _supported_dtype(Xi.dtype for Xi in arrays) + + res_dtype = X0.dtype + dev = exec_q.sycl_device + for i in range(1, n): + res_dtype = np.promote_types(res_dtype, arrays[i]) + res_dtype = _to_device_supported_dtype(res_dtype, dev) + + if check_ndim: + for i in range(1, n): + if X0.ndim != arrays[i].ndim: + raise ValueError( + "All the input arrays must have same number of dimensions, " + f"but the array at index 0 has {X0.ndim} dimension(s) and " + f"the array at index {i} has {arrays[i].ndim} dimension(s)." + ) + return res_dtype, res_usm_type, exec_q + + +def _broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape; + returns tuple broadcasted shape. + """ + array_shapes = [array.shape for array in args] + return _broadcast_shape_impl(array_shapes) + + def _broadcast_shape_impl(shapes): if len(set(shapes)) == 1: return shapes[0] @@ -86,6 +136,395 @@ def _broadcast_shape_impl(shapes): return tuple(common_shape) +def _broadcast_strides(X_shape, X_strides, res_ndim): + """ + Broadcasts strides to match the given dimensions; + returns tuple type strides. + """ + out_strides = [0] * res_ndim + X_shape_len = len(X_shape) + str_dim = -X_shape_len + for i in range(X_shape_len): + shape_value = X_shape[i] + if not shape_value == 1: + out_strides[str_dim] = X_strides[i] + str_dim += 1 + + return tuple(out_strides) + + +def _check_same_shapes(X0_shape, axis, n, arrays): + for i in range(1, n): + Xi_shape = arrays[i].shape + for j, X0j in enumerate(X0_shape): + if X0j != Xi_shape[j] and j != axis: + raise ValueError( + "All the input array dimensions for the concatenation " + f"axis must match exactly, but along dimension {j}, the " + f"array at index 0 has size {X0j} and the array " + f"at index {i} has size {Xi_shape[j]}." + ) + + +def _concat_axis_None(arrays): + """Implementation of concat(arrays, axis=None).""" + res_dtype, res_usm_type, exec_q = _arrays_validation( + arrays, check_ndim=False + ) + res_shape = 0 + for array in arrays: + res_shape += array.size + res = dpt_ext.empty( + res_shape, dtype=res_dtype, usm_type=res_usm_type, sycl_queue=exec_q + ) + + fill_start = 0 + _manager = dputils.SequentialOrderManager[exec_q] + deps = _manager.submitted_events + for array in arrays: + fill_end = fill_start + array.size + if array.flags.c_contiguous: + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=dpt_ext.reshape(array, -1), + dst=res[fill_start:fill_end], + sycl_queue=exec_q, + depends=deps, + ) + _manager.add_event_pair(hev, cpy_ev) + else: + src_ = array + # _copy_usm_ndarray_for_reshape requires src and dst to have + # the same data type + if not array.dtype == res_dtype: + src2_ = dpt_ext.empty_like(src_, dtype=res_dtype) + ht_copy_ev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=src_, dst=src2_, sycl_queue=exec_q, depends=deps + ) + _manager.add_event_pair(ht_copy_ev, cpy_ev) + hev, reshape_copy_ev = ti._copy_usm_ndarray_for_reshape( + src=src2_, + dst=res[fill_start:fill_end], + sycl_queue=exec_q, + depends=[cpy_ev], + ) + _manager.add_event_pair(hev, reshape_copy_ev) + else: + hev, cpy_ev = ti._copy_usm_ndarray_for_reshape( + src=src_, + dst=res[fill_start:fill_end], + sycl_queue=exec_q, + depends=deps, + ) + _manager.add_event_pair(hev, cpy_ev) + fill_start = fill_end + + return res + + +def broadcast_arrays(*args): + """broadcast_arrays(*arrays) + + Broadcasts one or more :class:`dpctl.tensor.usm_ndarrays` against + one another. + + Args: + arrays (usm_ndarray): an arbitrary number of arrays to be + broadcasted. + + Returns: + List[usm_ndarray]: + A list of broadcasted arrays. Each array + must have the same shape. Each array must have the same `dtype`, + `device` and `usm_type` attributes as its corresponding input + array. + """ + if len(args) == 0: + raise ValueError("`broadcast_arrays` requires at least one argument") + for X in args: + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + shape = _broadcast_shapes(*args) + + if all(X.shape == shape for X in args): + return args + + return [broadcast_to(X, shape) for X in args] + + +def broadcast_to(X, /, shape): + """broadcast_to(x, shape) + + Broadcast an array to a new `shape`; returns the broadcasted + :class:`dpctl.tensor.usm_ndarray` as a view. + + Args: + x (usm_ndarray): input array + shape (Tuple[int,...]): array shape. The `shape` must be + compatible with `x` according to broadcasting rules. + + Returns: + usm_ndarray: + An array with the specified `shape`. + The output array is a view of the input array, and + hence has the same data type, USM allocation type and + device attributes. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + # Use numpy.broadcast_to to check the validity of the input + # parameter 'shape'. Raise ValueError if 'X' is not compatible + # with 'shape' according to NumPy's broadcasting rules. + new_array = np.broadcast_to( + np.broadcast_to(np.empty(tuple(), dtype="u1"), X.shape), shape + ) + new_sts = _broadcast_strides(X.shape, X.strides, new_array.ndim) + return dpt.usm_ndarray( + shape=new_array.shape, + dtype=X.dtype, + buffer=X, + strides=new_sts, + offset=X._element_offset, + ) + + +def concat(arrays, /, *, axis=0): + """concat(arrays, axis) + + Joins a sequence of arrays along an existing axis. + + Args: + arrays (Union[List[usm_ndarray, Tuple[usm_ndarray,...]]]): + input arrays to join. The arrays must have the same shape, + except in the dimension specified by `axis`. + axis (Optional[int]): axis along which the arrays will be joined. + If `axis` is `None`, arrays must be flattened before + concatenation. If `axis` is negative, it is understood as + being counted from the last dimension. Default: `0`. + + Returns: + usm_ndarray: + An output array containing the concatenated + values. The output array data type is determined by Type + Promotion Rules of array API. + + All input arrays must have the same device attribute. The output array + is allocated on that same device, and data movement operations are + scheduled on a queue underlying the device. The USM allocation type + of the output array is determined by USM allocation type promotion + rules. + """ + if axis is None: + return _concat_axis_None(arrays) + + res_dtype, res_usm_type, exec_q = _arrays_validation(arrays) + n = len(arrays) + X0 = arrays[0] + + axis = normalize_axis_index(axis, X0.ndim) + X0_shape = X0.shape + _check_same_shapes(X0_shape, axis, n, arrays) + + res_shape_axis = 0 + for X in arrays: + res_shape_axis = res_shape_axis + X.shape[axis] + + res_shape = tuple( + X0_shape[i] if i != axis else res_shape_axis for i in range(X0.ndim) + ) + + res = dpt_ext.empty( + res_shape, dtype=res_dtype, usm_type=res_usm_type, sycl_queue=exec_q + ) + + _manager = dputils.SequentialOrderManager[exec_q] + deps = _manager.submitted_events + fill_start = 0 + for i in range(n): + fill_end = fill_start + arrays[i].shape[axis] + c_shapes_copy = tuple( + np.s_[fill_start:fill_end] if j == axis else np.s_[:] + for j in range(X0.ndim) + ) + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=arrays[i], + dst=res[c_shapes_copy], + sycl_queue=exec_q, + depends=deps, + ) + _manager.add_event_pair(hev, cpy_ev) + fill_start = fill_end + + return res + + +def expand_dims(X, /, *, axis=0): + """expand_dims(x, axis) + + Expands the shape of an array by inserting a new axis (dimension) + of size one at the position specified by axis. + + Args: + x (usm_ndarray): + input array + axis (Union[int, Tuple[int]]): + axis position in the expanded axes (zero-based). If `x` has rank + (i.e, number of dimensions) `N`, a valid `axis` must reside + in the closed-interval `[-N-1, N]`. If provided a negative + `axis`, the `axis` position at which to insert a singleton + dimension is computed as `N + axis + 1`. Hence, if + provided `-1`, the resolved axis position is `N` (i.e., + a singleton dimension must be appended to the input array `x`). + If provided `-N-1`, the resolved axis position is `0` (i.e., a + singleton dimension is prepended to the input array `x`). + + Returns: + usm_ndarray: + Returns a view, if possible, and a copy otherwise with the number + of dimensions increased. + The expanded array has the same data type as the input array `x`. + The expanded array is located on the same device as the input + array, and has the same USM allocation type. + + Raises: + IndexError: if `axis` value is invalid. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + if type(axis) not in (tuple, list): + axis = (axis,) + + out_ndim = len(axis) + X.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(X.shape) + shape = tuple(1 if ax in axis else next(shape_it) for ax in range(out_ndim)) + + return dpt_ext.reshape(X, shape) + + +def flip(X, /, *, axis=None): + """flip(x, axis) + + Reverses the order of elements in an array `x` along the given `axis`. + The shape of the array is preserved, but the elements are reordered. + + Args: + x (usm_ndarray): input array. + axis (Optional[Union[int, Tuple[int,...]]]): axis (or axes) along + which to flip. + If `axis` is `None`, all input array axes are flipped. + If `axis` is negative, the flipped axis is counted from the + last dimension. If provided more than one axis, only the specified + axes are flipped. Default: `None`. + + Returns: + usm_ndarray: + A view of `x` with the entries of `axis` reversed. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + X_ndim = X.ndim + if axis is None: + indexer = (np.s_[::-1],) * X_ndim + else: + axis = normalize_axis_tuple(axis, X_ndim) + indexer = tuple( + np.s_[::-1] if i in axis else np.s_[:] for i in range(X.ndim) + ) + return X[indexer] + + +def moveaxis(X, source, destination, /): + """moveaxis(x, source, destination) + + Moves axes of an array to new positions. + + Args: + x (usm_ndarray): input array + + source (int or a sequence of int): + Original positions of the axes to move. + These must be unique. If `x` has rank (i.e., number of + dimensions) `N`, a valid `axis` must be in the + half-open interval `[-N, N)`. + + destination (int or a sequence of int): + Destination positions for each of the original axes. + These must also be unique. If `x` has rank + (i.e., number of dimensions) `N`, a valid `axis` must be + in the half-open interval `[-N, N)`. + + Returns: + usm_ndarray: + Array with moved axes. + The returned array must has the same data type as `x`, + is created on the same device as `x` and has the same + USM allocation type as `x`. + + Raises: + AxisError: if `axis` value is invalid. + ValueError: if `src` and `dst` have not equal number of elements. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + source = normalize_axis_tuple(source, X.ndim, "source") + destination = normalize_axis_tuple(destination, X.ndim, "destination") + + if len(source) != len(destination): + raise ValueError( + "`source` and `destination` arguments must have " + "the same number of elements" + ) + + ind = [n for n in range(X.ndim) if n not in source] + + for src, dst in sorted(zip(destination, source)): + ind.insert(src, dst) + + return dpt_ext.permute_dims(X, tuple(ind)) + + +def permute_dims(X, /, axes): + """permute_dims(x, axes) + + Permute the axes (dimensions) of an array; returns the permuted + array as a view. + + Args: + x (usm_ndarray): input array. + axes (Tuple[int, ...]): tuple containing permutation of + `(0,1,...,N-1)` where `N` is the number of axes (dimensions) + of `x`. + Returns: + usm_ndarray: + An array with permuted axes. + The returned array must has the same data type as `x`, + is created on the same device as `x` and has the same USM allocation + type as `x`. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + axes = normalize_axis_tuple(axes, X.ndim, "axes") + if not X.ndim == len(axes): + raise ValueError( + "The length of the passed axes does not match " + "to the number of usm_ndarray dimensions." + ) + newstrides = tuple(X.strides[i] for i in axes) + newshape = tuple(X.shape[i] for i in axes) + return dpt.usm_ndarray( + shape=newshape, + dtype=X.dtype, + buffer=X, + strides=newstrides, + offset=X._element_offset, + ) + + def repeat(x, repeats, /, *, axis=None): """repeat(x, repeats, axis=None) @@ -204,7 +643,7 @@ def repeat(x, repeats, /, *, axis=None): "`repeats` sequence must have the same length as the " "repeated axis" ) - repeats = dpt.asarray( + repeats = dpt_ext.asarray( repeats, dtype=dpt.int64, usm_type=usm_type, sycl_queue=exec_q ) if not dpt.all(repeats >= 0): @@ -223,7 +662,7 @@ def repeat(x, repeats, /, *, axis=None): res_shape = x_shape[:axis] + (res_axis_size,) + x_shape[axis + 1 :] else: res_shape = (res_axis_size,) - res = dpt.empty( + res = dpt_ext.empty( res_shape, dtype=x.dtype, usm_type=usm_type, sycl_queue=exec_q ) if res_axis_size > 0: @@ -238,7 +677,7 @@ def repeat(x, repeats, /, *, axis=None): _manager.add_event_pair(ht_rep_ev, rep_ev) else: if repeats.dtype != dpt.int64: - rep_buf = dpt.empty( + rep_buf = dpt_ext.empty( repeats.shape, dtype=dpt.int64, usm_type=usm_type, @@ -248,7 +687,7 @@ def repeat(x, repeats, /, *, axis=None): src=repeats, dst=rep_buf, sycl_queue=exec_q, depends=dep_evs ) _manager.add_event_pair(ht_copy_ev, copy_ev) - cumsum = dpt.empty( + cumsum = dpt_ext.empty( (axis_size,), dtype=dpt.int64, usm_type=usm_type, @@ -264,7 +703,7 @@ def repeat(x, repeats, /, *, axis=None): ) else: res_shape = (res_axis_size,) - res = dpt.empty( + res = dpt_ext.empty( res_shape, dtype=x.dtype, usm_type=usm_type, @@ -281,7 +720,7 @@ def repeat(x, repeats, /, *, axis=None): ) _manager.add_event_pair(ht_rep_ev, rep_ev) else: - cumsum = dpt.empty( + cumsum = dpt_ext.empty( (axis_size,), dtype=dpt.int64, usm_type=usm_type, @@ -296,7 +735,7 @@ def repeat(x, repeats, /, *, axis=None): ) else: res_shape = (res_axis_size,) - res = dpt.empty( + res = dpt_ext.empty( res_shape, dtype=x.dtype, usm_type=usm_type, @@ -353,7 +792,7 @@ def roll(x, /, shift, *, axis=None): _manager = dputils.SequentialOrderManager[exec_q] if axis is None: shift = operator.index(shift) - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, usm_type=x.usm_type, sycl_queue=exec_q ) sz = operator.index(x.size) @@ -380,7 +819,7 @@ def roll(x, /, shift, *, axis=None): n_i = operator.index(shape[ax]) shifted = shifts[ax] + operator.index(sh) shifts[ax] = (shifted % n_i) if n_i > 0 else 0 - res = dpt.empty( + res = dpt_ext.empty( x.shape, dtype=x.dtype, usm_type=x.usm_type, sycl_queue=exec_q ) dep_evs = _manager.submitted_events @@ -389,3 +828,273 @@ def roll(x, /, shift, *, axis=None): ) _manager.add_event_pair(ht_e, roll_ev) return res + + +def squeeze(X, /, axis=None): + """squeeze(x, axis) + + Removes singleton dimensions (axes) from array `x`. + + Args: + x (usm_ndarray): input array + axis (Union[int, Tuple[int,...]]): axis (or axes) to squeeze. + + Returns: + usm_ndarray: + Output array is a view, if possible, + and a copy otherwise, but with all or a subset of the + dimensions of length 1 removed. Output has the same data + type as the input, is allocated on the same device as the + input and has the same USM allocation type as the input + array `x`. + + Raises: + ValueError: if the specified axis has a size greater than one. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + X_shape = X.shape + if axis is not None: + axis = normalize_axis_tuple(axis, X.ndim if X.ndim != 0 else X.ndim + 1) + new_shape = [] + for i, x in enumerate(X_shape): + if i not in axis: + new_shape.append(x) + else: + if x != 1: + raise ValueError( + "Cannot select an axis to squeeze out " + "which has size not equal to one." + ) + new_shape = tuple(new_shape) + else: + new_shape = tuple(axis for axis in X_shape if axis != 1) + if new_shape == X.shape: + return X + else: + return dpt_ext.reshape(X, new_shape) + + +def stack(arrays, /, *, axis=0): + """ + stack(arrays, axis) + + Joins a sequence of arrays along a new axis. + + Args: + arrays (Union[List[usm_ndarray], Tuple[usm_ndarray,...]]): + input arrays to join. Each array must have the same shape. + axis (int): axis along which the arrays will be joined. Providing + an `axis` specified the index of the new axis in the dimensions + of the output array. A valid axis must be on the interval + `[-N, N)`, where `N` is the rank (number of dimensions) of `x`. + Default: `0`. + + Returns: + usm_ndarray: + An output array having rank `N+1`, where `N` is + the rank (number of dimensions) of `x`. If the input arrays have + different data types, array API Type Promotion Rules apply. + + Raises: + ValueError: if not all input arrays have the same shape + IndexError: if provided an `axis` outside of the required interval. + """ + res_dtype, res_usm_type, exec_q = _arrays_validation(arrays) + + n = len(arrays) + X0 = arrays[0] + res_ndim = X0.ndim + 1 + axis = normalize_axis_index(axis, res_ndim) + X0_shape = X0.shape + + for i in range(1, n): + if X0_shape != arrays[i].shape: + raise ValueError("All input arrays must have the same shape") + + res_shape = tuple( + X0_shape[i - 1 * (i >= axis)] if i != axis else n + for i in range(res_ndim) + ) + + res = dpt_ext.empty( + res_shape, dtype=res_dtype, usm_type=res_usm_type, sycl_queue=exec_q + ) + + _manager = dputils.SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + for i in range(n): + c_shapes_copy = tuple( + i if j == axis else np.s_[:] for j in range(res_ndim) + ) + _dst = res[c_shapes_copy] + hev, cpy_ev = ti._copy_usm_ndarray_into_usm_ndarray( + src=arrays[i], dst=_dst, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(hev, cpy_ev) + + return res + + +def swapaxes(X, axis1, axis2): + """swapaxes(x, axis1, axis2) + + Interchanges two axes of an array. + + Args: + x (usm_ndarray): input array + + axis1 (int): First axis. + If `x` has rank (i.e., number of dimensions) `N`, + a valid `axis` must be in the half-open interval `[-N, N)`. + + axis2 (int): Second axis. + If `x` has rank (i.e., number of dimensions) `N`, + a valid `axis` must be in the half-open interval `[-N, N)`. + + Returns: + usm_ndarray: + Array with swapped axes. + The returned array must has the same data type as `x`, + is created on the same device as `x` and has the same USM + allocation type as `x`. + + Raises: + AxisError: if `axis` value is invalid. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + axis1 = normalize_axis_index(axis1, X.ndim, "axis1") + axis2 = normalize_axis_index(axis2, X.ndim, "axis2") + + ind = list(range(0, X.ndim)) + ind[axis1] = axis2 + ind[axis2] = axis1 + return dpt_ext.permute_dims(X, tuple(ind)) + + +def unstack(X, /, *, axis=0): + """unstack(x, axis=0) + + Splits an array in a sequence of arrays along the given axis. + + Args: + x (usm_ndarray): input array + + axis (int, optional): axis along which `x` is unstacked. + If `x` has rank (i.e, number of dimensions) `N`, + a valid `axis` must reside in the half-open interval `[-N, N)`. + Default: `0`. + + Returns: + Tuple[usm_ndarray,...]: + Output sequence of arrays which are views into the input array. + + Raises: + AxisError: if the `axis` value is invalid. + """ + if not isinstance(X, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(X)}.") + + axis = normalize_axis_index(axis, X.ndim) + Y = dpt_ext.moveaxis(X, axis, 0) + + return tuple(Y[i] for i in range(Y.shape[0])) + + +def tile(x, repetitions, /): + """tile(x, repetitions) + + Repeat an input array `x` along each axis a number of times given by + `repetitions`. + + For `N` = len(`repetitions`) and `M` = len(`x.shape`): + + * If `M < N`, `x` will have `N - M` new axes prepended to its shape + * If `M > N`, `repetitions` will have `M - N` ones prepended to it + + Args: + x (usm_ndarray): input array + + repetitions (Union[int, Tuple[int, ...]]): + The number of repetitions along each dimension of `x`. + + Returns: + usm_ndarray: + tiled output array. + + The returned array will have rank `max(M, N)`. If `S` is the + shape of `x` after prepending dimensions and `R` is + `repetitions` after prepending ones, then the shape of the + result will be `S[i] * R[i]` for each dimension `i`. + + The returned array will have the same data type as `x`. + The returned array will be located on the same device as `x` and + have the same USM allocation type as `x`. + """ + if not isinstance(x, dpt.usm_ndarray): + raise TypeError(f"Expected usm_ndarray type, got {type(x)}.") + + if not isinstance(repetitions, tuple): + if isinstance(repetitions, int): + repetitions = (repetitions,) + else: + raise TypeError( + f"Expected tuple or integer type, got {type(repetitions)}." + ) + + rep_dims = len(repetitions) + x_dims = x.ndim + if rep_dims < x_dims: + repetitions = (x_dims - rep_dims) * (1,) + repetitions + elif x_dims < rep_dims: + x = dpt_ext.reshape(x, (rep_dims - x_dims) * (1,) + x.shape) + res_shape = tuple(map(lambda sh, rep: sh * rep, x.shape, repetitions)) + # case of empty input + if x.size == 0: + return dpt_ext.empty( + res_shape, + dtype=x.dtype, + usm_type=x.usm_type, + sycl_queue=x.sycl_queue, + ) + in_sh = x.shape + if res_shape == in_sh: + return dpt_ext.copy(x) + expanded_sh = [] + broadcast_sh = [] + out_sz = 1 + for i in range(len(res_shape)): + out_sz *= res_shape[i] + reps, sh = repetitions[i], in_sh[i] + if reps == 1: + # dimension will be unchanged + broadcast_sh.append(sh) + expanded_sh.append(sh) + elif sh == 1: + # dimension will be broadcast + broadcast_sh.append(reps) + expanded_sh.append(sh) + else: + broadcast_sh.extend([reps, sh]) + expanded_sh.extend([1, sh]) + exec_q = x.sycl_queue + xdt = x.dtype + xut = x.usm_type + res = dpt_ext.empty((out_sz,), dtype=xdt, usm_type=xut, sycl_queue=exec_q) + # no need to copy data for empty output + if out_sz > 0: + x = dpt_ext.broadcast_to( + # this reshape should never copy + dpt_ext.reshape(x, expanded_sh), + broadcast_sh, + ) + # copy broadcast input into flat array + _manager = dputils.SequentialOrderManager[exec_q] + dep_evs = _manager.submitted_events + hev, cp_ev = ti._copy_usm_ndarray_for_reshape( + src=x, dst=res, sycl_queue=exec_q, depends=dep_evs + ) + _manager.add_event_pair(hev, cp_ev) + return dpt_ext.reshape(res, res_shape) diff --git a/dpctl_ext/tensor/_reshape.py b/dpctl_ext/tensor/_reshape.py index 61aa6c9c754f..23cf47a83568 100644 --- a/dpctl_ext/tensor/_reshape.py +++ b/dpctl_ext/tensor/_reshape.py @@ -32,9 +32,11 @@ import dpctl.utils import numpy as np -# TODO: revert to `from dpctl.tensor._tensor_impl...` +# TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor -from dpctl_ext.tensor._tensor_impl import ( +import dpctl_ext.tensor as dpt_ext + +from ._tensor_impl import ( _copy_usm_ndarray_for_reshape, _ravel_multi_index, _unravel_index, @@ -187,7 +189,7 @@ def reshape(X, /, shape, *, order="C", copy=None): src=X, dst=flat_res, sycl_queue=copy_q, depends=dep_evs ) else: - X_t = dpt.permute_dims(X, range(X.ndim - 1, -1, -1)) + X_t = dpt_ext.permute_dims(X, range(X.ndim - 1, -1, -1)) hev, r_e = _copy_usm_ndarray_for_reshape( src=X_t, dst=flat_res, sycl_queue=copy_q, depends=dep_evs ) diff --git a/dpctl_ext/tensor/_scalar_utils.py b/dpctl_ext/tensor/_scalar_utils.py index 86787baea8cc..3ab92b42ad00 100644 --- a/dpctl_ext/tensor/_scalar_utils.py +++ b/dpctl_ext/tensor/_scalar_utils.py @@ -33,6 +33,10 @@ import numpy as np from dpctl.tensor._usmarray import _is_object_with_buffer_protocol as _is_buffer +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext + from ._type_utils import ( WeakBooleanType, WeakComplexType, @@ -59,7 +63,7 @@ def _get_dtype(o, dev): if isinstance(o, dpt.usm_ndarray): return o.dtype if hasattr(o, "__sycl_usm_array_interface__"): - return dpt.asarray(o).dtype + return dpt_ext.asarray(o).dtype if _is_buffer(o): host_dt = np.array(o).dtype dev_dt = _to_device_supported_dtype(host_dt, dev) diff --git a/dpctl_ext/tensor/_search_functions.py b/dpctl_ext/tensor/_search_functions.py index a82845e3520c..285a02b42bb8 100644 --- a/dpctl_ext/tensor/_search_functions.py +++ b/dpctl_ext/tensor/_search_functions.py @@ -291,7 +291,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): if ti._array_overlap(condition, out) and not ti._same_logical_tensors( condition, out ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(x1, dpt.usm_ndarray): if ( @@ -299,7 +299,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): and not ti._same_logical_tensors(x1, out) and x1_dtype == out_dtype ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if isinstance(x2, dpt.usm_ndarray): if ( @@ -307,7 +307,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): and not ti._same_logical_tensors(x2, out) and x2_dtype == out_dtype ): - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) if order == "A": order = ( @@ -323,9 +323,9 @@ def where(condition, x1, x2, /, *, order="K", out=None): else "C" ) if not isinstance(x1, dpt.usm_ndarray): - x1 = dpt.asarray(x1, dtype=x1_dtype, sycl_queue=exec_q) + x1 = dpt_ext.asarray(x1, dtype=x1_dtype, sycl_queue=exec_q) if not isinstance(x2, dpt.usm_ndarray): - x2 = dpt.asarray(x2, dtype=x2_dtype, sycl_queue=exec_q) + x2 = dpt_ext.asarray(x2, dtype=x2_dtype, sycl_queue=exec_q) if condition.size == 0: if out is not None: @@ -342,7 +342,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): exec_q, ) else: - return dpt.empty( + return dpt_ext.empty( res_shape, dtype=out_dtype, order=order, @@ -356,7 +356,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): if order == "K": _x1 = _empty_like_orderK(x1, out_dtype) else: - _x1 = dpt.empty_like(x1, dtype=out_dtype, order=order) + _x1 = dpt_ext.empty_like(x1, dtype=out_dtype, order=order) ht_copy1_ev, copy1_ev = ti._copy_usm_ndarray_into_usm_ndarray( src=x1, dst=_x1, sycl_queue=exec_q, depends=dep_evs ) @@ -367,7 +367,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): if order == "K": _x2 = _empty_like_orderK(x2, out_dtype) else: - _x2 = dpt.empty_like(x2, dtype=out_dtype, order=order) + _x2 = dpt_ext.empty_like(x2, dtype=out_dtype, order=order) ht_copy2_ev, copy2_ev = ti._copy_usm_ndarray_into_usm_ndarray( src=x2, dst=_x2, sycl_queue=exec_q, depends=dep_evs ) @@ -380,7 +380,7 @@ def where(condition, x1, x2, /, *, order="K", out=None): condition, x1, x2, out_dtype, res_shape, out_usm_type, exec_q ) else: - out = dpt.empty( + out = dpt_ext.empty( res_shape, dtype=out_dtype, order=order, @@ -389,11 +389,11 @@ def where(condition, x1, x2, /, *, order="K", out=None): ) if condition_shape != res_shape: - condition = dpt.broadcast_to(condition, res_shape) + condition = dpt_ext.broadcast_to(condition, res_shape) if x1_shape != res_shape: - x1 = dpt.broadcast_to(x1, res_shape) + x1 = dpt_ext.broadcast_to(x1, res_shape) if x2_shape != res_shape: - x2 = dpt.broadcast_to(x2, res_shape) + x2 = dpt_ext.broadcast_to(x2, res_shape) dep_evs = _manager.submitted_events hev, where_ev = ti._where( diff --git a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp index f48dfa4d4077..67f2502067ca 100644 --- a/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp +++ b/dpctl_ext/tensor/libtensor/include/kernels/constructors.hpp @@ -36,6 +36,7 @@ #include #include +#include #include #include @@ -54,6 +55,10 @@ using dpctl::tensor::ssize_t; @defgroup CtorKernels */ +template +class linear_sequence_step_kernel; +template +class linear_sequence_affine_kernel; template class full_strided_kernel; template @@ -61,6 +66,179 @@ class eye_kernel; using namespace dpctl::tensor::offset_utils; +template +class LinearSequenceStepFunctor +{ +private: + Ty *p = nullptr; + Ty start_v; + Ty step_v; + +public: + LinearSequenceStepFunctor(char *dst_p, Ty v0, Ty dv) + : p(reinterpret_cast(dst_p)), start_v(v0), step_v(dv) + { + } + + void operator()(sycl::id<1> wiid) const + { + auto i = wiid.get(0); + using dpctl::tensor::type_utils::is_complex; + if constexpr (is_complex::value) { + p[i] = Ty{start_v.real() + i * step_v.real(), + start_v.imag() + i * step_v.imag()}; + } + else { + p[i] = start_v + i * step_v; + } + } +}; + +/*! + * @brief Function to submit kernel to populate given contiguous memory + * allocation with linear sequence specified by typed starting value and + * increment. + * + * @param q Sycl queue to which the kernel is submitted + * @param nelems Length of the sequence + * @param start_v Typed starting value of the sequence + * @param step_v Typed increment of the sequence + * @param array_data Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event lin_space_step_impl(sycl::queue &exec_q, + std::size_t nelems, + Ty start_v, + Ty step_v, + char *array_data, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(exec_q); + sycl::event lin_space_step_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + cgh.parallel_for>( + sycl::range<1>{nelems}, + LinearSequenceStepFunctor(array_data, start_v, step_v)); + }); + + return lin_space_step_event; +} + +// Constructor to populate tensor with linear sequence defined by +// start and data + +template +class LinearSequenceAffineFunctor +{ +private: + Ty *p = nullptr; + Ty start_v; + Ty end_v; + std::size_t n; + +public: + LinearSequenceAffineFunctor(char *dst_p, Ty v0, Ty v1, std::size_t den) + : p(reinterpret_cast(dst_p)), start_v(v0), end_v(v1), + n((den == 0) ? 1 : den) + { + } + + void operator()(sycl::id<1> wiid) const + { + auto i = wiid.get(0); + wTy wc = wTy(i) / n; + wTy w = wTy(n - i) / n; + using dpctl::tensor::type_utils::is_complex; + if constexpr (is_complex::value) { + using reT = typename Ty::value_type; + auto _w = static_cast(w); + auto _wc = static_cast(wc); + auto re_comb = sycl::fma(start_v.real(), _w, reT(0)); + re_comb = + sycl::fma(end_v.real(), _wc, + re_comb); // start_v.real() * _w + end_v.real() * _wc; + auto im_comb = + sycl::fma(start_v.imag(), _w, + reT(0)); // start_v.imag() * _w + end_v.imag() * _wc; + im_comb = sycl::fma(end_v.imag(), _wc, im_comb); + Ty affine_comb = Ty{re_comb, im_comb}; + p[i] = affine_comb; + } + else if constexpr (std::is_floating_point::value) { + Ty _w = static_cast(w); + Ty _wc = static_cast(wc); + auto affine_comb = + sycl::fma(start_v, _w, Ty(0)); // start_v * w + end_v * wc; + affine_comb = sycl::fma(end_v, _wc, affine_comb); + p[i] = affine_comb; + } + else { + using dpctl::tensor::type_utils::convert_impl; + auto affine_comb = start_v * w + end_v * wc; + p[i] = convert_impl(affine_comb); + } + } +}; + +/*! + * @brief Function to submit kernel to populate given contiguous memory + * allocation with linear sequence specified by typed starting and end values. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Length of the sequence. + * @param start_v Starting value of the sequence. + * @param end_v End-value of the sequence. + * @param include_endpoint Whether the end-value is included in the sequence. + * @param array_data Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event lin_space_affine_impl(sycl::queue &exec_q, + std::size_t nelems, + Ty start_v, + Ty end_v, + bool include_endpoint, + char *array_data, + const std::vector &depends) +{ + dpctl::tensor::type_utils::validate_type_for_device(exec_q); + + const bool device_supports_doubles = + exec_q.get_device().has(sycl::aspect::fp64); + const std::size_t den = (include_endpoint) ? nelems - 1 : nelems; + + sycl::event lin_space_affine_event = exec_q.submit([&](sycl::handler &cgh) { + cgh.depends_on(depends); + if (device_supports_doubles) { + using KernelName = linear_sequence_affine_kernel; + using Impl = LinearSequenceAffineFunctor; + + cgh.parallel_for(sycl::range<1>{nelems}, + Impl(array_data, start_v, end_v, den)); + } + else { + using KernelName = linear_sequence_affine_kernel; + using Impl = LinearSequenceAffineFunctor; + + cgh.parallel_for(sycl::range<1>{nelems}, + Impl(array_data, start_v, end_v, den)); + } + }); + + return lin_space_affine_event; +} + /* ================ Full ================== */ /*! diff --git a/dpctl_ext/tensor/libtensor/source/linear_sequences.cpp b/dpctl_ext/tensor/libtensor/source/linear_sequences.cpp new file mode 100644 index 000000000000..9a7bf2dbcc0f --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/linear_sequences.cpp @@ -0,0 +1,306 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===---------------------------------------------------------------------===// + +#include +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include // py::cast> +#include + +#include "kernels/constructors.hpp" +#include "utils/output_validation.hpp" +#include "utils/type_dispatch.hpp" + +#include "linear_sequences.hpp" + +namespace dpctl::tensor::py_internal +{ + +namespace py = pybind11; +namespace td_ns = dpctl::tensor::type_dispatch; + +// Constructor to populate tensor with linear sequence defined by +// start and step data + +typedef sycl::event (*lin_space_step_fn_ptr_t)( + sycl::queue &, + std::size_t, // num_elements + const py::object &start, + const py::object &step, + char *, // dst_data_ptr + const std::vector &); + +/*! + * @brief Function to submit kernel to populate given contiguous memory + * allocation with linear sequence specified by starting value and increment + * given as Python objects. + * + * @param q Sycl queue to which the kernel is submitted + * @param nelems Length of the sequence + * @param start Starting value of the sequence as Python object. Must be + * convertible to array element data type `Ty`. + * @param step Increment of the sequence as Python object. Must be convertible + * to array element data type `Ty`. + * @param array_data Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event lin_space_step_impl(sycl::queue &exec_q, + std::size_t nelems, + const py::object &start, + const py::object &step, + char *array_data, + const std::vector &depends) +{ + Ty start_v = py::cast(start); + Ty step_v = py::cast(step); + + using dpctl::tensor::kernels::constructors::lin_space_step_impl; + + auto lin_space_step_event = lin_space_step_impl( + exec_q, nelems, start_v, step_v, array_data, depends); + + return lin_space_step_event; +} + +typedef sycl::event (*lin_space_affine_fn_ptr_t)( + sycl::queue &, + std::size_t, // num_elements + const py::object &start, + const py::object &end, + bool include_endpoint, + char *, // dst_data_ptr + const std::vector &); + +/*! + * @brief Function to submit kernel to populate given contiguous memory + * allocation with linear sequence specified by starting and end values given + * as Python objects. + * + * @param exec_q Sycl queue to which kernel is submitted for execution. + * @param nelems Length of the sequence + * @param start Stating value of the sequence as Python object. Must be + * convertible to array data element type `Ty`. + * @param end End-value of the sequence as Python object. Must be convertible + * to array data element type `Ty`. + * @param include_endpoint Whether the end-value is included in the sequence + * @param array_data Kernel accessible USM pointer to the start of array to be + * populated. + * @param depends List of events to wait for before starting computations, if + * any. + * + * @return Event to wait on to ensure that computation completes. + * @defgroup CtorKernels + */ +template +sycl::event lin_space_affine_impl(sycl::queue &exec_q, + std::size_t nelems, + const py::object &start, + const py::object &end, + bool include_endpoint, + char *array_data, + const std::vector &depends) +{ + Ty start_v = py::cast(start); + Ty end_v = py::cast(end); + + using dpctl::tensor::kernels::constructors::lin_space_affine_impl; + + auto lin_space_affine_event = lin_space_affine_impl( + exec_q, nelems, start_v, end_v, include_endpoint, array_data, depends); + + return lin_space_affine_event; +} + +using dpctl::utils::keep_args_alive; + +static lin_space_step_fn_ptr_t lin_space_step_dispatch_vector[td_ns::num_types]; + +static lin_space_affine_fn_ptr_t + lin_space_affine_dispatch_vector[td_ns::num_types]; + +std::pair + usm_ndarray_linear_sequence_step(const py::object &start, + const py::object &dt, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends) +{ + // dst must be 1D and C-contiguous + // start, end should be coercible into data type of dst + + if (dst.get_ndim() != 1) { + throw py::value_error( + "usm_ndarray_linspace: Expecting 1D array to populate"); + } + + if (!dst.is_c_contiguous()) { + throw py::value_error( + "usm_ndarray_linspace: Non-contiguous arrays are not supported"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error( + "Execution queue is not compatible with the allocation queue"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_typenum = dst.get_typenum(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + py::ssize_t len = dst.get_shape(0); + if (len == 0) { + // nothing to do + return std::make_pair(sycl::event{}, sycl::event{}); + } + + char *dst_data = dst.get_data(); + sycl::event linspace_step_event; + + auto fn = lin_space_step_dispatch_vector[dst_typeid]; + + linspace_step_event = + fn(exec_q, static_cast(len), start, dt, dst_data, depends); + + return std::make_pair(keep_args_alive(exec_q, {dst}, {linspace_step_event}), + linspace_step_event); +} + +std::pair + usm_ndarray_linear_sequence_affine(const py::object &start, + const py::object &end, + const dpctl::tensor::usm_ndarray &dst, + bool include_endpoint, + sycl::queue &exec_q, + const std::vector &depends) +{ + // dst must be 1D and C-contiguous + // start, end should be coercible into data type of dst + + if (dst.get_ndim() != 1) { + throw py::value_error( + "usm_ndarray_linspace: Expecting 1D array to populate"); + } + + if (!dst.is_c_contiguous()) { + throw py::value_error( + "usm_ndarray_linspace: Non-contiguous arrays are not supported"); + } + + if (!dpctl::utils::queues_are_compatible(exec_q, {dst})) { + throw py::value_error( + "Execution queue context is not the same as allocation context"); + } + + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); + + auto array_types = td_ns::usm_ndarray_types(); + int dst_typenum = dst.get_typenum(); + int dst_typeid = array_types.typenum_to_lookup_id(dst_typenum); + + py::ssize_t len = dst.get_shape(0); + if (len == 0) { + // nothing to do + return std::make_pair(sycl::event{}, sycl::event{}); + } + + char *dst_data = dst.get_data(); + sycl::event linspace_affine_event; + + auto fn = lin_space_affine_dispatch_vector[dst_typeid]; + + linspace_affine_event = fn(exec_q, static_cast(len), start, + end, include_endpoint, dst_data, depends); + + return std::make_pair( + keep_args_alive(exec_q, {dst}, {linspace_affine_event}), + linspace_affine_event); +} + +/*! + * @brief Factor to get function pointer of type `fnT` for array with elements + * of type `Ty`. + * @defgroup CtorKernels + */ +template +struct LinSpaceStepFactory +{ + fnT get() + { + fnT f = lin_space_step_impl; + return f; + } +}; + +/*! + * @brief Factory to get function pointer of type `fnT` for array data type + * `Ty`. + */ +template +struct LinSpaceAffineFactory +{ + fnT get() + { + fnT f = lin_space_affine_impl; + return f; + } +}; + +void init_linear_sequences_dispatch_vectors(void) +{ + using namespace td_ns; + + DispatchVectorBuilder + dvb1; + dvb1.populate_dispatch_vector(lin_space_step_dispatch_vector); + + DispatchVectorBuilder + dvb2; + dvb2.populate_dispatch_vector(lin_space_affine_dispatch_vector); +} + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/linear_sequences.hpp b/dpctl_ext/tensor/libtensor/source/linear_sequences.hpp new file mode 100644 index 000000000000..45cf45153462 --- /dev/null +++ b/dpctl_ext/tensor/libtensor/source/linear_sequences.hpp @@ -0,0 +1,66 @@ +//***************************************************************************** +// Copyright (c) 2026, Intel Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// - Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// - Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +//***************************************************************************** +// +//===---------------------------------------------------------------------===// +/// +/// \file +/// This file defines functions of dpctl.tensor._tensor_impl extensions +//===---------------------------------------------------------------------===// + +#pragma once +#include +#include + +#include + +#include "dpnp4pybind11.hpp" +#include + +namespace py = pybind11; + +namespace dpctl::tensor::py_internal +{ + +extern std::pair usm_ndarray_linear_sequence_step( + const py::object &start, + const py::object &dt, + const dpctl::tensor::usm_ndarray &dst, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern std::pair usm_ndarray_linear_sequence_affine( + const py::object &start, + const py::object &end, + const dpctl::tensor::usm_ndarray &dst, + bool include_endpoint, + sycl::queue &exec_q, + const std::vector &depends = {}); + +extern void init_linear_sequences_dispatch_vectors(void); + +} // namespace dpctl::tensor::py_internal diff --git a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp index 5e5b07c087f8..cdd6e43ed9c5 100644 --- a/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp +++ b/dpctl_ext/tensor/libtensor/source/tensor_ctors.cpp @@ -56,7 +56,7 @@ #include "full_ctor.hpp" #include "integer_advanced_indexing.hpp" #include "kernels/dpctl_tensor_types.hpp" -// #include "linear_sequences.hpp" +#include "linear_sequences.hpp" #include "repeat.hpp" #include "simplify_iteration_space.hpp" #include "triul_ctor.hpp" @@ -94,8 +94,8 @@ using dpctl::tensor::py_internal::copy_numpy_ndarray_into_usm_ndarray; /* ============= linear-sequence ==================== */ -// using dpctl::tensor::py_internal::usm_ndarray_linear_sequence_affine; -// using dpctl::tensor::py_internal::usm_ndarray_linear_sequence_step; +using dpctl::tensor::py_internal::usm_ndarray_linear_sequence_affine; +using dpctl::tensor::py_internal::usm_ndarray_linear_sequence_step; /* ================ Full ================== */ @@ -154,7 +154,7 @@ void init_dispatch_vectors(void) init_copy_as_contig_dispatch_vectors(); init_copy_for_reshape_dispatch_vectors(); init_copy_for_roll_dispatch_vectors(); - // init_linear_sequences_dispatch_vectors(); + init_linear_sequences_dispatch_vectors(); init_full_ctor_dispatch_vectors(); init_zeros_ctor_dispatch_vectors(); init_eye_ctor_dispatch_vectors(); @@ -297,20 +297,22 @@ PYBIND11_MODULE(_tensor_impl, m) py::arg("src"), py::arg("dst"), py::arg("shifts"), py::arg("sycl_queue"), py::arg("depends") = py::list()); - // m.def("_linspace_step", &usm_ndarray_linear_sequence_step, - // "Fills input 1D contiguous usm_ndarray `dst` with linear - // sequence " "specified by " "starting point `start` and step - // `dt`. " "Returns a tuple of events: (ht_event, comp_event)", - // py::arg("start"), py::arg("dt"), py::arg("dst"), - // py::arg("sycl_queue"), py::arg("depends") = py::list()); - - // m.def("_linspace_affine", &usm_ndarray_linear_sequence_affine, - // "Fills input 1D contiguous usm_ndarray `dst` with linear - // sequence " "specified by " "starting point `start` and end - // point `end`. " "Returns a tuple of events: (ht_event, - // comp_event)", py::arg("start"), py::arg("end"), py::arg("dst"), - // py::arg("include_endpoint"), py::arg("sycl_queue"), - // py::arg("depends") = py::list()); + m.def("_linspace_step", &usm_ndarray_linear_sequence_step, + "Fills input 1D contiguous usm_ndarray `dst` with linear sequence " + "specified by " + "starting point `start` and step `dt`. " + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("start"), py::arg("dt"), py::arg("dst"), + py::arg("sycl_queue"), py::arg("depends") = py::list()); + + m.def("_linspace_affine", &usm_ndarray_linear_sequence_affine, + "Fills input 1D contiguous usm_ndarray `dst` with linear sequence " + "specified by " + "starting point `start` and end point `end`. " + "Returns a tuple of events: (ht_event, comp_event)", + py::arg("start"), py::arg("end"), py::arg("dst"), + py::arg("include_endpoint"), py::arg("sycl_queue"), + py::arg("depends") = py::list()); m.def("_copy_numpy_ndarray_into_usm_ndarray", ©_numpy_ndarray_into_usm_ndarray, diff --git a/dpnp/dpnp_algo/dpnp_arraycreation.py b/dpnp/dpnp_algo/dpnp_arraycreation.py index f3dd18153563..4e2ee8531a18 100644 --- a/dpnp/dpnp_algo/dpnp_arraycreation.py +++ b/dpnp/dpnp_algo/dpnp_arraycreation.py @@ -53,7 +53,7 @@ def _as_usm_ndarray(a, usm_type, sycl_queue): if isinstance(a, dpnp_array): a = a.get_array() - return dpt.asarray(a, usm_type=usm_type, sycl_queue=sycl_queue) + return dpt_ext.asarray(a, usm_type=usm_type, sycl_queue=sycl_queue) def _check_has_zero_val(a): @@ -196,7 +196,7 @@ def dpnp_linspace( if dpnp.isscalar(start) and dpnp.isscalar(stop): # Call linspace() function for scalars. - usm_res = dpt.linspace( + usm_res = dpt_ext.linspace( start, stop, num, @@ -213,19 +213,19 @@ def dpnp_linspace( else: step = dpnp.nan else: - usm_start = dpt.asarray( + usm_start = dpt_ext.asarray( start, dtype=dt, usm_type=_usm_type, sycl_queue=sycl_queue_normalized, ) - usm_stop = dpt.asarray( + usm_stop = dpt_ext.asarray( stop, dtype=dt, usm_type=_usm_type, sycl_queue=sycl_queue_normalized ) delta = usm_stop - usm_start - usm_res = dpt.arange( + usm_res = dpt_ext.arange( 0, stop=num, step=1, @@ -256,7 +256,7 @@ def dpnp_linspace( usm_res[-1, ...] = usm_stop if axis != 0: - usm_res = dpt.moveaxis(usm_res, 0, axis) + usm_res = dpt_ext.moveaxis(usm_res, 0, axis) if dpnp.issubdtype(dtype, dpnp.integer): dpt.floor(usm_res, out=usm_res) @@ -266,7 +266,7 @@ def dpnp_linspace( if retstep is True: if dpnp.isscalar(step): - step = dpt.asarray( + step = dpt_ext.asarray( step, usm_type=res.usm_type, sycl_queue=res.sycl_queue ) return res, dpnp_array._create_from_usm_ndarray(step) diff --git a/dpnp/dpnp_algo/dpnp_elementwise_common.py b/dpnp/dpnp_algo/dpnp_elementwise_common.py index 722b8cb3b3f0..d7eeccf78489 100644 --- a/dpnp/dpnp_algo/dpnp_elementwise_common.py +++ b/dpnp/dpnp_algo/dpnp_elementwise_common.py @@ -467,7 +467,7 @@ def __call__( ) # Allocate a temporary buffer with the required dtype - out[i] = dpt.empty_like(res, dtype=res_dt) + out[i] = dpt_ext.empty_like(res, dtype=res_dt) elif ( buf_dt is None and dti._array_overlap(x, res) @@ -476,7 +476,7 @@ def __call__( # Allocate a temporary buffer to avoid memory overlapping. # Note if `buf_dt` is not None, a temporary copy of `x` will be # created, so the array overlap check isn't needed. - out[i] = dpt.empty_like(res) + out[i] = dpt_ext.empty_like(res) _manager = dpu.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events @@ -486,7 +486,7 @@ def __call__( if order == "K": buf = dtc._empty_like_orderK(x, buf_dt) else: - buf = dpt.empty_like(x, dtype=buf_dt, order=order) + buf = dpt_ext.empty_like(x, dtype=buf_dt, order=order) ht_copy_ev, copy_ev = dti._copy_usm_ndarray_into_usm_ndarray( src=x, dst=buf, sycl_queue=exec_q, depends=dep_evs @@ -503,7 +503,7 @@ def __call__( if order == "K": out[i] = dtc._empty_like_orderK(x, res_dt) else: - out[i] = dpt.empty_like(x, dtype=res_dt, order=order) + out[i] = dpt_ext.empty_like(x, dtype=res_dt, order=order) # Call the unary function with input and output arrays ht_unary_ev, unary_ev = self.get_implementation_function()( @@ -713,7 +713,7 @@ def __call__( if dtype is not None: if dpnp.isscalar(x1): - x1_usm = dpt.asarray( + x1_usm = dpt_ext.asarray( x1, dtype=dtype, sycl_queue=x2.sycl_queue, @@ -722,7 +722,7 @@ def __call__( x2_usm = dpt_ext.astype(x2_usm, dtype, copy=False) elif dpnp.isscalar(x2): x1_usm = dpt_ext.astype(x1_usm, dtype, copy=False) - x2_usm = dpt.asarray( + x2_usm = dpt_ext.asarray( x2, dtype=dtype, sycl_queue=x1.sycl_queue, @@ -1078,7 +1078,7 @@ def __call__( ) # Allocate a temporary buffer with the required dtype - out[i] = dpt.empty_like(res, dtype=res_dt) + out[i] = dpt_ext.empty_like(res, dtype=res_dt) else: # If `dt` is not None, a temporary copy of `x` will be created, # so the array overlap check isn't needed. @@ -1094,7 +1094,7 @@ def __call__( for x in x_to_check ): # allocate a temporary buffer to avoid memory overlapping - out[i] = dpt.empty_like(res) + out[i] = dpt_ext.empty_like(res) x1 = dpnp.as_usm_ndarray(x1, dtype=x1_dt, sycl_queue=exec_q) x2 = dpnp.as_usm_ndarray(x2, dtype=x2_dt, sycl_queue=exec_q) @@ -1127,7 +1127,7 @@ def __call__( if order == "K": buf = dtc._empty_like_orderK(x, buf_dt) else: - buf = dpt.empty_like(x, dtype=buf_dt, order=order) + buf = dpt_ext.empty_like(x, dtype=buf_dt, order=order) ht_copy_ev, copy_ev = dti._copy_usm_ndarray_into_usm_ndarray( src=x, dst=buf, sycl_queue=exec_q, depends=dep_evs @@ -1146,7 +1146,7 @@ def __call__( x1, x2, res_dt, res_shape, res_usm_type, exec_q ) else: - out[i] = dpt.empty( + out[i] = dpt_ext.empty( res_shape, dtype=res_dt, order=order, @@ -1156,9 +1156,9 @@ def __call__( # Broadcast shapes of input arrays if x1.shape != res_shape: - x1 = dpt.broadcast_to(x1, res_shape) + x1 = dpt_ext.broadcast_to(x1, res_shape) if x2.shape != res_shape: - x2 = dpt.broadcast_to(x2, res_shape) + x2 = dpt_ext.broadcast_to(x2, res_shape) # Call the binary function with input and output arrays ht_binary_ev, binary_ev = self.get_implementation_function()( diff --git a/dpnp/dpnp_algo/dpnp_fill.py b/dpnp/dpnp_algo/dpnp_fill.py index ddba9f634cb1..c9ae58a114a9 100644 --- a/dpnp/dpnp_algo/dpnp_fill.py +++ b/dpnp/dpnp_algo/dpnp_fill.py @@ -28,14 +28,13 @@ from numbers import Number -import dpctl.tensor as dpt import dpctl.utils as dpu -from dpctl.tensor._ctors import _cast_fill_val # TODO: revert to `from dpctl.tensor...` # when dpnp fully migrates dpctl/tensor -import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor as dpt import dpnp +from dpctl_ext.tensor._ctors import _cast_fill_val from dpctl_ext.tensor._tensor_impl import ( _copy_usm_ndarray_into_usm_ndarray, _full_usm_ndarray, @@ -56,7 +55,7 @@ def dpnp_fill(arr, val): raise dpu.ExecutionPlacementError( "Input arrays have incompatible queues." ) - a_val = dpt_ext.astype(val, arr.dtype) + a_val = dpt.astype(val, arr.dtype) a_val = dpt.broadcast_to(a_val, arr.shape) _manager = dpu.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events diff --git a/dpnp/dpnp_array.py b/dpnp/dpnp_array.py index b3ed3770396d..6418302d6e7b 100644 --- a/dpnp/dpnp_array.py +++ b/dpnp/dpnp_array.py @@ -2283,7 +2283,7 @@ def transpose(self, *axes): # self.transpose(None).shape == self.shape[::-1] axes = tuple((ndim - x - 1) for x in range(ndim)) - usm_res = dpt.permute_dims(self._array_obj, axes) + usm_res = dpt_ext.permute_dims(self._array_obj, axes) return dpnp_array._create_from_usm_ndarray(usm_res) def var( diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py index 0727b9bfd775..9fe955746593 100644 --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -35,12 +35,11 @@ """ -import dpctl.tensor as dpt import dpctl.utils as dpu # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor -import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor as dpt import dpnp from dpnp.dpnp_array import dpnp_array @@ -143,7 +142,7 @@ def copy(x1, /, *, order="K"): if order is None: order = "K" - array_obj = dpt_ext.copy(dpnp.get_usm_ndarray(x1), order=order) + array_obj = dpt.copy(dpnp.get_usm_ndarray(x1), order=order) return dpnp_array._create_from_usm_ndarray(array_obj) @@ -196,7 +195,7 @@ def eye( order = "C" """Creates `dpnp_array` with ones on the `k`th diagonal.""" - array_obj = dpt_ext.eye( + array_obj = dpt.eye( N, M, k=k, @@ -231,7 +230,7 @@ def full( fill_value = fill_value.get_array() """Creates `dpnp_array` having a specified shape, filled with fill_value.""" - array_obj = dpt_ext.full( + array_obj = dpt.full( shape, fill_value, dtype=dtype, @@ -272,13 +271,13 @@ def ones( def tril(x1, /, *, k=0): """Creates `dpnp_array` as lower triangular part of an input array.""" - array_obj = dpt_ext.tril(dpnp.get_usm_ndarray(x1), k=k) + array_obj = dpt.tril(dpnp.get_usm_ndarray(x1), k=k) return dpnp_array._create_from_usm_ndarray(array_obj) def triu(x1, /, *, k=0): """Creates `dpnp_array` as upper triangular part of an input array.""" - array_obj = dpt_ext.triu(dpnp.get_usm_ndarray(x1), k=k) + array_obj = dpt.triu(dpnp.get_usm_ndarray(x1), k=k) return dpnp_array._create_from_usm_ndarray(array_obj) diff --git a/dpnp/dpnp_iface.py b/dpnp/dpnp_iface.py index 6c050a208981..9fca083a6413 100644 --- a/dpnp/dpnp_iface.py +++ b/dpnp/dpnp_iface.py @@ -191,7 +191,7 @@ def as_usm_ndarray(a, dtype=None, device=None, usm_type=None, sycl_queue=None): if is_supported_array_type(a): return get_usm_ndarray(a) - return dpt.asarray( + return dpt_ext.asarray( a, dtype=dtype, device=device, usm_type=usm_type, sycl_queue=sycl_queue ) diff --git a/dpnp/dpnp_iface_arraycreation.py b/dpnp/dpnp_iface_arraycreation.py index 52fc4b7f6448..d09cc17bde79 100644 --- a/dpnp/dpnp_iface_arraycreation.py +++ b/dpnp/dpnp_iface_arraycreation.py @@ -3131,7 +3131,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing="xy"): output[1] = dpt_ext.reshape(output[1], (-1, 1) + s0[2:]) if not sparse: - output = dpt.broadcast_arrays(*output) + output = dpt_ext.broadcast_arrays(*output) if copy: output = [dpt_ext.copy(x) for x in output] @@ -3696,7 +3696,7 @@ def tri( if usm_type is None: usm_type = "device" - m = dpt.ones( + m = dpt_ext.ones( (N, M), dtype=_dtype, device=device, @@ -3912,7 +3912,7 @@ def vander( if dpnp.is_supported_array_type(x): x = dpnp.get_usm_ndarray(x) - usm_x = dpt.asarray( + usm_x = dpt_ext.asarray( x, device=device, usm_type=usm_type, sycl_queue=sycl_queue ) @@ -3935,7 +3935,7 @@ def vander( tmp = m[:, ::-1] if not increasing else m dpnp.power( dpt_ext.reshape(usm_x, (-1, 1)), - dpt.arange( + dpt_ext.arange( N, dtype=_dtype, usm_type=x_usm_type, sycl_queue=x_sycl_queue ), out=tmp, diff --git a/dpnp/dpnp_iface_indexing.py b/dpnp/dpnp_iface_indexing.py index bc190db70c4e..a52196e9e4db 100644 --- a/dpnp/dpnp_iface_indexing.py +++ b/dpnp/dpnp_iface_indexing.py @@ -141,9 +141,9 @@ def _choose_run(inds, chcs, q, usm_type, out=None, mode=0): ti._array_overlap(out, chc) for chc in chcs ): # Allocate a temporary buffer to avoid memory overlapping. - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) else: - out = dpt.empty( + out = dpt_ext.empty( inds.shape, dtype=chcs[0].dtype, usm_type=usm_type, sycl_queue=q ) @@ -260,7 +260,7 @@ def choose(a, choices, out=None, mode="wrap"): choices, ) ) - arrs_broadcast = dpt.broadcast_arrays(inds, *choices) + arrs_broadcast = dpt_ext.broadcast_arrays(inds, *choices) inds = arrs_broadcast[0] choices = tuple(arrs_broadcast[1:]) @@ -301,9 +301,11 @@ def _take_index(x, inds, axis, q, usm_type, out=None, mode=0): if ti._array_overlap(x, out): # Allocate a temporary buffer to avoid memory overlapping. - out = dpt.empty_like(out) + out = dpt_ext.empty_like(out) else: - out = dpt.empty(res_sh, dtype=x.dtype, usm_type=usm_type, sycl_queue=q) + out = dpt_ext.empty( + res_sh, dtype=x.dtype, usm_type=usm_type, sycl_queue=q + ) _manager = dpu.SequentialOrderManager[q] dep_evs = _manager.submitted_events @@ -1803,7 +1805,7 @@ def put_along_axis(a, ind, values, axis, mode="wrap"): if dpnp.is_supported_array_type(values): usm_vals = dpnp.get_usm_ndarray(values) else: - usm_vals = dpt.asarray( + usm_vals = dpt_ext.asarray( values, usm_type=a.usm_type, sycl_queue=a.sycl_queue ) @@ -2151,7 +2153,7 @@ def take(a, indices, /, *, axis=None, out=None, mode="wrap"): usm_a = dpnp.get_usm_ndarray(a) if not dpnp.is_supported_array_type(indices): - usm_ind = dpt.asarray( + usm_ind = dpt_ext.asarray( indices, usm_type=a.usm_type, sycl_queue=a.sycl_queue ) else: diff --git a/dpnp/dpnp_iface_manipulation.py b/dpnp/dpnp_iface_manipulation.py index d5e1e1aa5706..d188ae098cd9 100644 --- a/dpnp/dpnp_iface_manipulation.py +++ b/dpnp/dpnp_iface_manipulation.py @@ -375,7 +375,7 @@ def _get_first_nan_index(usm_a): ): if dpnp.issubdtype(usm_a.dtype, dpnp.complexfloating): # for complex all NaNs are considered equivalent - true_val = dpt.asarray( + true_val = dpt_ext.asarray( True, sycl_queue=usm_a.sycl_queue, usm_type=usm_a.usm_type ) return dpt.searchsorted(dpt.isnan(usm_a), true_val, side="left") @@ -1093,7 +1093,9 @@ def broadcast_arrays(*args, subok=False): if len(args) == 0: return [] - usm_arrays = dpt.broadcast_arrays(*[dpnp.get_usm_ndarray(a) for a in args]) + usm_arrays = dpt_ext.broadcast_arrays( + *[dpnp.get_usm_ndarray(a) for a in args] + ) return [dpnp_array._create_from_usm_ndarray(a) for a in usm_arrays] @@ -1178,7 +1180,7 @@ def broadcast_to(array, /, shape, subok=False): raise NotImplementedError(f"subok={subok} is currently not supported") usm_array = dpnp.get_usm_ndarray(array) - new_array = dpt.broadcast_to(usm_array, shape) + new_array = dpt_ext.broadcast_to(usm_array, shape) return dpnp_array._create_from_usm_ndarray(new_array) @@ -1416,7 +1418,7 @@ def concatenate( ) usm_arrays = [dpnp.get_usm_ndarray(x) for x in arrays] - usm_res = dpt.concat(usm_arrays, axis=axis) + usm_res = dpt_ext.concat(usm_arrays, axis=axis) res = dpnp_array._create_from_usm_ndarray(usm_res) if dtype is not None: @@ -1521,7 +1523,7 @@ def copyto(dst, src, casting="same_kind", where=True): f"but got {where.dtype}" ) - dst_usm, src_usm, mask_usm = dpt.broadcast_arrays( + dst_usm, src_usm, mask_usm = dpt_ext.broadcast_arrays( dpnp.get_usm_ndarray(dst), dpnp.get_usm_ndarray(src), dpnp.get_usm_ndarray(where), @@ -1849,7 +1851,7 @@ def expand_dims(a, axis): """ usm_a = dpnp.get_usm_ndarray(a) - usm_res = dpt.expand_dims(usm_a, axis=axis) + usm_res = dpt_ext.expand_dims(usm_a, axis=axis) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -1920,7 +1922,7 @@ def flip(m, axis=None): """ m_usm = dpnp.get_usm_ndarray(m) - return dpnp_array._create_from_usm_ndarray(dpt.flip(m_usm, axis=axis)) + return dpnp_array._create_from_usm_ndarray(dpt_ext.flip(m_usm, axis=axis)) def fliplr(m): @@ -2408,7 +2410,7 @@ def moveaxis(a, source, destination): usm_array = dpnp.get_usm_ndarray(a) return dpnp_array._create_from_usm_ndarray( - dpt.moveaxis(usm_array, source, destination) + dpt_ext.moveaxis(usm_array, source, destination) ) @@ -3663,7 +3665,7 @@ def squeeze(a, /, axis=None): """ usm_a = dpnp.get_usm_ndarray(a) - usm_res = dpt.squeeze(usm_a, axis=axis) + usm_res = dpt_ext.squeeze(usm_a, axis=axis) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -3751,7 +3753,7 @@ def stack(arrays, /, *, axis=0, out=None, dtype=None, casting="same_kind"): ) usm_arrays = [dpnp.get_usm_ndarray(x) for x in arrays] - usm_res = dpt.stack(usm_arrays, axis=axis) + usm_res = dpt_ext.stack(usm_arrays, axis=axis) res = dpnp_array._create_from_usm_ndarray(usm_res) if dtype is not None: @@ -3812,7 +3814,7 @@ def swapaxes(a, axis1, axis2): """ usm_a = dpnp.get_usm_ndarray(a) - usm_res = dpt.swapaxes(usm_a, axis1=axis1, axis2=axis2) + usm_res = dpt_ext.swapaxes(usm_a, axis1=axis1, axis2=axis2) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -3892,7 +3894,7 @@ def tile(A, reps): """ usm_a = dpnp.get_usm_ndarray(A) - usm_res = dpt.tile(usm_a, reps) + usm_res = dpt_ext.tile(usm_a, reps) return dpnp_array._create_from_usm_ndarray(usm_res) @@ -4522,7 +4524,7 @@ def unstack(x, /, *, axis=0): if usm_x.ndim == 0: raise ValueError("Input array must be at least 1-d.") - res = dpt.unstack(usm_x, axis=axis) + res = dpt_ext.unstack(usm_x, axis=axis) return tuple(dpnp_array._create_from_usm_ndarray(a) for a in res) diff --git a/dpnp/dpnp_iface_searching.py b/dpnp/dpnp_iface_searching.py index a2389978d506..15f52338ec7e 100644 --- a/dpnp/dpnp_iface_searching.py +++ b/dpnp/dpnp_iface_searching.py @@ -376,7 +376,7 @@ def searchsorted(a, v, side="left", sorter=None): usm_a = dpnp.get_usm_ndarray(a) if dpnp.isscalar(v): - usm_v = dpt.asarray(v, sycl_queue=a.sycl_queue, usm_type=a.usm_type) + usm_v = dpt_ext.asarray(v, sycl_queue=a.sycl_queue, usm_type=a.usm_type) else: usm_v = dpnp.get_usm_ndarray(v) diff --git a/dpnp/fft/dpnp_utils_fft.py b/dpnp/fft/dpnp_utils_fft.py index b959b78e1ad0..20d0dcd0cff2 100644 --- a/dpnp/fft/dpnp_utils_fft.py +++ b/dpnp/fft/dpnp_utils_fft.py @@ -42,19 +42,13 @@ from collections.abc import Sequence import dpctl - -# pylint: disable=no-name-in-module -# TODO: remove it when ti.__linspace_step -# is migrated to dpctl_ext/tensor -import dpctl.tensor._tensor_impl as ti import dpctl.utils as dpu import numpy from dpctl.utils import ExecutionPlacementError -# pylint: disable=no-name-in-module # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor -import dpctl_ext.tensor._tensor_impl as ti_ext +import dpctl_ext.tensor._tensor_impl as ti import dpnp import dpnp.backend.extensions.fft._fft_impl as fi from dpctl_ext.tensor._numpy_helper import ( @@ -205,7 +199,7 @@ def _compute_result(dsc, a, out, forward, c2c, out_strides): if ( out is not None and out.strides == tuple(out_strides) - and not ti_ext._array_overlap(a_usm, dpnp.get_usm_ndarray(out)) + and not ti._array_overlap(a_usm, dpnp.get_usm_ndarray(out)) ): res_usm = out_usm result = out @@ -538,7 +532,7 @@ def _truncate_or_pad(a, shape, axes): ) _manager = dpu.SequentialOrderManager[exec_q] dep_evs = _manager.submitted_events - ht_copy_ev, copy_ev = ti_ext._copy_usm_ndarray_into_usm_ndarray( + ht_copy_ev, copy_ev = ti._copy_usm_ndarray_into_usm_ndarray( src=dpnp.get_usm_ndarray(a), dst=z.get_array()[tuple(index)], sycl_queue=exec_q, diff --git a/dpnp/tests/test_arraycreation.py b/dpnp/tests/test_arraycreation.py index 88e6aacb997d..8d89f2a42ca8 100644 --- a/dpnp/tests/test_arraycreation.py +++ b/dpnp/tests/test_arraycreation.py @@ -2,7 +2,6 @@ from math import prod import dpctl -import dpctl.tensor as dpt import numpy import pytest from numpy.testing import ( @@ -15,7 +14,7 @@ # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor -import dpctl_ext.tensor as dpt_ext +import dpctl_ext.tensor as dpt import dpnp from .helper import ( @@ -972,7 +971,7 @@ def test_ones_like(array, dtype, order): ], ) def test_dpctl_tensor_input(func, args): - x0 = dpt_ext.reshape(dpt.arange(9), (3, 3)) + x0 = dpt.reshape(dpt.arange(9), (3, 3)) new_args = [eval(val, {"x0": x0}) for val in args] X = getattr(dpt, func)(*new_args) Y = getattr(dpnp, func)(*new_args) diff --git a/dpnp/tests/test_arraymanipulation.py b/dpnp/tests/test_arraymanipulation.py index 17d2c07fd6cc..f7df6387caf6 100644 --- a/dpnp/tests/test_arraymanipulation.py +++ b/dpnp/tests/test_arraymanipulation.py @@ -1,10 +1,10 @@ -import warnings - -import dpctl.tensor as dpt import numpy import pytest from numpy.testing import assert_array_equal, assert_equal, assert_raises +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp # TODO: revert to `from dpctl.tensor...` diff --git a/dpnp/tests/test_fft.py b/dpnp/tests/test_fft.py index 226420057748..3a19a2cf3668 100644 --- a/dpnp/tests/test_fft.py +++ b/dpnp/tests/test_fft.py @@ -1,10 +1,12 @@ import dpctl -import dpctl.tensor as dpt import numpy import pytest from dpctl.utils import ExecutionPlacementError from numpy.testing import assert_raises +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp from dpnp.dpnp_utils import map_dtype_to_device diff --git a/dpnp/tests/test_indexing.py b/dpnp/tests/test_indexing.py index 79c41a2f45f7..d8822d77080b 100644 --- a/dpnp/tests/test_indexing.py +++ b/dpnp/tests/test_indexing.py @@ -1,7 +1,6 @@ import functools import dpctl -import dpctl.tensor as dpt import numpy import pytest from dpctl.utils import ExecutionPlacementError @@ -13,10 +12,10 @@ assert_raises_regex, ) -import dpnp - # TODO: revert to `import dpctl.tensor...` # when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt +import dpnp from dpctl_ext.tensor._numpy_helper import AxisError from dpctl_ext.tensor._type_utils import _to_device_supported_dtype from dpnp.dpnp_array import dpnp_array diff --git a/dpnp/tests/test_linalg.py b/dpnp/tests/test_linalg.py index e1ad9af7d220..dfd6e21c2a95 100644 --- a/dpnp/tests/test_linalg.py +++ b/dpnp/tests/test_linalg.py @@ -1,7 +1,6 @@ import warnings import dpctl -import dpctl.tensor as dpt import numpy import pytest from dpctl.utils import ExecutionPlacementError @@ -13,6 +12,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp # TODO: revert to `from dpctl.tensor...` diff --git a/dpnp/tests/test_manipulation.py b/dpnp/tests/test_manipulation.py index 2512c0955da7..d30c08a65f1e 100644 --- a/dpnp/tests/test_manipulation.py +++ b/dpnp/tests/test_manipulation.py @@ -1,6 +1,5 @@ import itertools -import dpctl.tensor as dpt import numpy import pytest from numpy.testing import ( @@ -9,6 +8,9 @@ assert_raises, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp # TODO: revert to `from dpctl.tensor...` diff --git a/dpnp/tests/test_mathematical.py b/dpnp/tests/test_mathematical.py index ef8f6731ffd2..c03787790280 100644 --- a/dpnp/tests/test_mathematical.py +++ b/dpnp/tests/test_mathematical.py @@ -11,6 +11,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp # TODO: revert to `from dpctl.tensor...` @@ -669,15 +672,15 @@ def test_to_begin_to_end(self, to_begin, to_end): "to_begin, to_end", [ (-20, 20), - (dpt.asarray([-20, -30]), dpt.asarray([20, 15])), - (dpt.asarray([[-20, -30]]), dpt.asarray([[20, 15]])), + (dpt_ext.asarray([-20, -30]), dpt_ext.asarray([20, 15])), + (dpt_ext.asarray([[-20, -30]]), dpt_ext.asarray([[20, 15]])), ([1, 2], [3, 4]), ((1, 2), (3, 4)), ], ) def test_usm_ndarray(self, to_begin, to_end): a = numpy.array([[1, 2, 0]]) - dpt_a = dpt.asarray(a) + dpt_a = dpt_ext.asarray(a) if isinstance(to_begin, dpt.usm_ndarray): np_to_begin = dpt.asnumpy(to_begin) @@ -1578,7 +1581,7 @@ def test_out(self): assert_allclose(result, expected) # output is usm_ndarray - dpt_out = dpt.empty(expected.shape, dtype=expected.dtype) + dpt_out = dpt_ext.empty(expected.shape, dtype=expected.dtype) result = dpnp.prod(ia, axis=0, out=dpt_out) assert dpt_out is result.get_array() assert_allclose(result, expected) @@ -2631,7 +2634,7 @@ def test_out_float16(self, func): def test_out_usm_ndarray(self, func, dt): a = generate_random_numpy_array(10, dt) out = numpy.empty(a.shape, dtype=dt) - ia, usm_out = dpnp.array(a), dpt.asarray(out) + ia, usm_out = dpnp.array(a), dpt_ext.asarray(out) expected = getattr(numpy, func)(a, out=out) result = getattr(dpnp, func)(ia, out=usm_out) diff --git a/dpnp/tests/test_memory.py b/dpnp/tests/test_memory.py index 1bc0da8c1535..94aeda33f505 100644 --- a/dpnp/tests/test_memory.py +++ b/dpnp/tests/test_memory.py @@ -2,6 +2,9 @@ import numpy import pytest +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp import dpnp.memory as dpm @@ -21,7 +24,7 @@ def test_wrong_input_type(self, x): dpm.create_data(x) def test_wrong_usm_data(self): - a = dpt.ones(10) + a = dpt_ext.ones(10) d = IntUsmData(a.shape, buffer=a) with pytest.raises(TypeError): diff --git a/dpnp/tests/test_nanfunctions.py b/dpnp/tests/test_nanfunctions.py index d92cee045a72..2cb70df5954a 100644 --- a/dpnp/tests/test_nanfunctions.py +++ b/dpnp/tests/test_nanfunctions.py @@ -1,5 +1,4 @@ import dpctl -import dpctl.tensor as dpt import numpy import pytest from dpctl.utils import ExecutionPlacementError @@ -12,6 +11,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp from .helper import ( diff --git a/dpnp/tests/test_ndarray.py b/dpnp/tests/test_ndarray.py index 4e4e42bbc85e..a27f0fe6aa14 100644 --- a/dpnp/tests/test_ndarray.py +++ b/dpnp/tests/test_ndarray.py @@ -9,6 +9,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt_ext import dpnp from .helper import ( @@ -407,7 +410,7 @@ def test_error(self): class TestUsmNdarrayProtocol: def test_basic(self): a = dpnp.arange(256, dtype=dpnp.int64) - usm_a = dpt.asarray(a) + usm_a = dpt_ext.asarray(a) assert a.sycl_queue == usm_a.sycl_queue assert a.usm_type == usm_a.usm_type diff --git a/dpnp/tests/test_search.py b/dpnp/tests/test_search.py index 64c4eb75f906..36e0032ccff1 100644 --- a/dpnp/tests/test_search.py +++ b/dpnp/tests/test_search.py @@ -1,8 +1,10 @@ -import dpctl.tensor as dpt import numpy import pytest from numpy.testing import assert_array_equal, assert_equal, assert_raises +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp from .helper import ( diff --git a/dpnp/tests/test_statistics.py b/dpnp/tests/test_statistics.py index cf436087b607..fe8848b6c858 100644 --- a/dpnp/tests/test_statistics.py +++ b/dpnp/tests/test_statistics.py @@ -1,5 +1,4 @@ import dpctl -import dpctl.tensor as dpt import numpy import pytest from numpy.testing import ( @@ -9,6 +8,9 @@ assert_raises_regex, ) +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp from .helper import ( diff --git a/dpnp/tests/test_sycl_queue.py b/dpnp/tests/test_sycl_queue.py index d1853579036a..a9c076a7c476 100644 --- a/dpnp/tests/test_sycl_queue.py +++ b/dpnp/tests/test_sycl_queue.py @@ -2,12 +2,14 @@ import tempfile import dpctl -import dpctl.tensor as dpt import numpy import pytest from dpctl.utils import ExecutionPlacementError from numpy.testing import assert_array_equal, assert_raises +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp import dpnp.linalg from dpnp.dpnp_array import dpnp_array diff --git a/dpnp/tests/test_usm_type.py b/dpnp/tests/test_usm_type.py index 4fc0f2b958fa..8f8efd1cdd10 100644 --- a/dpnp/tests/test_usm_type.py +++ b/dpnp/tests/test_usm_type.py @@ -2,11 +2,13 @@ import tempfile from math import prod -import dpctl.tensor as dpt import dpctl.utils as du import numpy import pytest +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp from dpnp.dpnp_utils import get_usm_allocations diff --git a/dpnp/tests/test_utils.py b/dpnp/tests/test_utils.py index eef9132e5b55..ddbd267c2108 100644 --- a/dpnp/tests/test_utils.py +++ b/dpnp/tests/test_utils.py @@ -1,7 +1,9 @@ -import dpctl.tensor as dpt import numpy import pytest +# TODO: revert to `import dpctl.tensor...` +# when dpnp fully migrates dpctl/tensor +import dpctl_ext.tensor as dpt import dpnp From 85ef3e0ef0c3248fe39d55c5c428e9a884297afa Mon Sep 17 00:00:00 2001 From: Vladislav Perevezentsev Date: Thu, 5 Mar 2026 10:45:57 -0800 Subject: [PATCH 11/11] Apply remarks --- dpctl_ext/tensor/_accumulation.py | 6 +++--- .../libtensor/source/accumulators/accumulate_over_axis.hpp | 5 ++++- .../libtensor/source/accumulators/cumulative_logsumexp.cpp | 3 --- .../libtensor/source/accumulators/cumulative_prod.cpp | 3 --- .../tensor/libtensor/source/accumulators/cumulative_sum.cpp | 3 --- 5 files changed, 7 insertions(+), 13 deletions(-) diff --git a/dpctl_ext/tensor/_accumulation.py b/dpctl_ext/tensor/_accumulation.py index 17596e5647fc..2dfe9656e198 100644 --- a/dpctl_ext/tensor/_accumulation.py +++ b/dpctl_ext/tensor/_accumulation.py @@ -35,14 +35,14 @@ import dpctl_ext.tensor as dpt_ext import dpctl_ext.tensor._tensor_accumulation_impl as tai import dpctl_ext.tensor._tensor_impl as ti -from dpctl_ext.tensor._type_utils import ( + +from ._numpy_helper import normalize_axis_index +from ._type_utils import ( _default_accumulation_dtype, _default_accumulation_dtype_fp_types, _to_device_supported_dtype, ) -from ._numpy_helper import normalize_axis_index - def _accumulate_common( x, diff --git a/dpctl_ext/tensor/libtensor/source/accumulators/accumulate_over_axis.hpp b/dpctl_ext/tensor/libtensor/source/accumulators/accumulate_over_axis.hpp index 712ab180ef37..4dd00620a260 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators/accumulate_over_axis.hpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators/accumulate_over_axis.hpp @@ -35,9 +35,12 @@ #pragma once +#include #include -#include +#include +#include #include +#include #include #include #include diff --git a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_logsumexp.cpp b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_logsumexp.cpp index 572c93c0066e..f1ad170caa58 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_logsumexp.cpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_logsumexp.cpp @@ -311,7 +311,6 @@ void init_cumulative_logsumexp(py::module_ m) int trailing_dims_to_accumulate, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal::py_accumulate_over_axis; return py_accumulate_over_axis(src, trailing_dims_to_accumulate, dst, exec_q, depends, cumlogsumexp_strided_dispatch_table, @@ -326,8 +325,6 @@ void init_cumulative_logsumexp(py::module_ m) auto cumlogsumexp_include_initial_pyapi = [&](const arrayT &src, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal:: - py_accumulate_final_axis_include_initial; return py_accumulate_final_axis_include_initial( src, dst, exec_q, depends, cumlogsumexp_include_initial_strided_dispatch_table, diff --git a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_prod.cpp b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_prod.cpp index 07c802c8cffa..9a9961441d35 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_prod.cpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_prod.cpp @@ -322,7 +322,6 @@ void init_cumulative_prod(py::module_ m) auto cumprod_pyapi = [&](const arrayT &src, int trailing_dims_to_accumulate, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal::py_accumulate_over_axis; return py_accumulate_over_axis( src, trailing_dims_to_accumulate, dst, exec_q, depends, cumprod_strided_dispatch_table, cumprod_1d_contig_dispatch_table); @@ -336,8 +335,6 @@ void init_cumulative_prod(py::module_ m) auto cumprod_include_initial_pyapi = [&](const arrayT &src, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal:: - py_accumulate_final_axis_include_initial; return py_accumulate_final_axis_include_initial( src, dst, exec_q, depends, cumprod_include_initial_strided_dispatch_table, diff --git a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_sum.cpp b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_sum.cpp index 3c422bfd0998..3a0ed6cf3ab5 100644 --- a/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_sum.cpp +++ b/dpctl_ext/tensor/libtensor/source/accumulators/cumulative_sum.cpp @@ -320,7 +320,6 @@ void init_cumulative_sum(py::module_ m) auto cumsum_pyapi = [&](const arrayT &src, int trailing_dims_to_accumulate, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal::py_accumulate_over_axis; return py_accumulate_over_axis( src, trailing_dims_to_accumulate, dst, exec_q, depends, cumsum_strided_dispatch_table, cumsum_1d_contig_dispatch_table); @@ -334,8 +333,6 @@ void init_cumulative_sum(py::module_ m) auto cumsum_include_initial_pyapi = [&](const arrayT &src, const arrayT &dst, sycl::queue &exec_q, const event_vecT &depends = {}) { - using dpctl::tensor::py_internal:: - py_accumulate_final_axis_include_initial; return py_accumulate_final_axis_include_initial( src, dst, exec_q, depends, cumsum_include_initial_strided_dispatch_table,