diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/resources.py b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/resources.py index b381ec3e..5f35e5d9 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/resources.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/resources.py @@ -17,7 +17,7 @@ """Resources for ast tree parse.""" import ast import math - + from mindspore import RowTensor, SparseTensor, COOTensor, CSRTensor from mindspore.ops import functional as F, composite as C from mindspore.ops.composite import multitype_ops @@ -25,16 +25,16 @@ from mindspore._c_expression import security from . import standard_method as M from . import trope as T from .namespace import CellNamespace - + # namespace define functional_ns = CellNamespace('mindspore.ops.functional') composite_ns = CellNamespace('mindspore.ops.composite') trope_ns = CellNamespace('mindspore._extends.parse.trope') - + NO_IMPLEMENT = None # not implemented SYMBOL_UNDEFINE = 0xFF # Undefined var and function - -# Some space set aside for readability of code + +# 一些空间设置以提高代码可读性 parse_object_map = { # ast grammar ast.Add: (trope_ns, 'add'), @@ -64,17 +64,17 @@ parse_object_map = { ast.IsNot: (trope_ns, 'is_not'), ast.In: (trope_ns, 'contains'), ast.NotIn: (trope_ns, 'not_contains'), - + # operation symbol type 'getitem': (composite_ns, 'getitem'), 'ms_iter': (composite_ns, 'ms_iter'), 'ms_next': (composite_ns, 'ms_next'), 'hasnext': (composite_ns, 'hasnext'), - + # undefined type SYMBOL_UNDEFINE: (None, 'undefine'), } - + # Operation symbols corresponding to ast grammar ops_symbol_map = { # ast grammar @@ -88,13 +88,13 @@ ops_symbol_map = { ast.LShift: '<<', ast.RShift: '>>', ast.BitXor: '^', - + # undefined type SYMBOL_UNDEFINE: '', } - -# Escape an object to another object, eg: system function(len,xxx) -# Some space set aside for readability of code + +# 将一个对象转为另一个对象,例如:系统函数(len,xxx) +# 一些空间设置以提高代码可读性 convert_object_map = { T.add: multitype_ops.add, T.sub: multitype_ops.sub, @@ -124,7 +124,7 @@ convert_object_map = { T.is_not: F.is_not, T.contains: multitype_ops.in_, T.not_contains: multitype_ops.not_in_, - + # system function T.len: M.ms_len, T.bool_: M.bool_, @@ -134,7 +134,7 @@ convert_object_map = { T.zip: C.zip_operation, T.enumerate: M.enumerate_, T.isinstance: M.isinstance_, - + # custom define operation T.iter: M.ms_iter, T.next: M.ms_next, @@ -145,7 +145,7 @@ convert_object_map = { T.make_slice: F.make_slice, T.range: F.make_range, T.while_cond: M.while_cond, - + # lib function math.floor: NO_IMPLEMENT, math.trunc: NO_IMPLEMENT, @@ -154,13 +154,14 @@ convert_object_map = { math.sin: NO_IMPLEMENT, math.cos: NO_IMPLEMENT, math.tan: NO_IMPLEMENT, - + # user defined RowTensor: F.make_row_tensor, SparseTensor: F.make_sparse_tensor, COOTensor: F.make_coo_tensor, CSRTensor: F.make_csr_tensor } - + +# 如果不启用安全性,则将 T.print 映射到 F.print_ if not security.enable_security(): - convert_object_map[T.print] = F.print_ + convert_object_map[T.print] = F.print_ \ No newline at end of file diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/standard_method.py b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/standard_method.py index fb6cbbb8..34731c01 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/standard_method.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/standard_method.py @@ -17,10 +17,10 @@ """standard_method""" from dataclasses import dataclass - + from mindspore import Tensor, Parameter, CSRTensor, COOTensor from mindspore import dtype as mstype - + from ..._checkparam import Validator as validator from ...ops import functional as F from ...ops import operations as P @@ -32,10 +32,10 @@ from ...ops.composite.multitype_ops import _constexpr_utils as const_utils from ...ops.composite.multitype_ops import _compile_utils as compile_utils from ...ops.operations._inner_ops import Format from ...ops.primitive import constexpr - - + + __all__ = ['MultitypeFuncGraph', 'env_get', 'hyper_add', 'zeros_like', 'ones_like'] - + shape_ = P.Shape() dtype_ = P.DType() abs_ = P.Abs() @@ -46,30 +46,30 @@ _format = Format() _reduce_sum_default = P.ReduceSum() _reduce_sum_keepdims = P.ReduceSum(True) _mean_keepdims = P.ReduceMean(True) - + itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1, mstype.float16: 2, mstype.int16: 2, mstype.uint16: 2, mstype.float32: 4, mstype.int32: 4, mstype.uint32: 4, mstype.float64: 8, mstype.int64: 8, mstype.uint64: 8} - + nan_tensor = Tensor(float('nan'), dtype=mstype.float32) - - + + def mean(x, axis=(), keep_dims=False): """ Reduces a dimension of a tensor by averaging all elements in the dimension. - + Args: axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction, - when axis is None or empty tuple, reduce all dimensions. Default: (). + when axis is None or empty tuple, reduce all dimensions. Default: (). keep_dims (bool): Whether to keep the reduced dimensions. Default: False. - + Returns: Tensor, has the same data type as input tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -82,36 +82,36 @@ def mean(x, axis=(), keep_dims=False): axis = () reduce_mean = P.ReduceMean(keep_dims) return reduce_mean(x, axis) - - + + def all_(x, axis=(), keep_dims=False): """ Check all array elements along a given axis evaluate to True. - + Args: x (Tensor): A Tensor to be reduced. axis (Union[None, int, tuple(int)): Dimensions of reduction. keep_dims (bool): Whether to keep the reduced dimensions. - + Returns: Tensor, has the same data type as x. """ - + if axis is None: axis = () reduce_all = P.ReduceAll(keep_dims) return reduce_all(x, axis) - - + + def any_(x, axis=(), keep_dims=False): """ Check any array element along a given axis evaluate to True. - + Args: x (Tensor): A Tensor to be reduced. axis (Union[None, int, tuple(int)): Dimensions of reduction. keep_dims (bool): Whether to keep the reduced dimensions. - + Returns: Tensor, has the same data type as x. """ @@ -119,59 +119,59 @@ def any_(x, axis=(), keep_dims=False): axis = () reduce_any = P.ReduceAny(keep_dims) return reduce_any(x, axis) - - + + def size_(x): """ Return the number of elements in tensor `x`. - + Note: To strictly follow Numpy's behaviour, return 1 for tensor scalar. - + Args: x (Tensor): Input tensor. - + Returns: size(int). """ if not shape_(x): return size_op_(x) + 1 return size_op_(x) - - + + def itemsize_(x): """ Return length of one tensor element in bytes. - + Args: x (Tensor): Input tensor. - + Returns: itemsize(int). """ return get_itemsize(x.dtype) - - + + def nbytes_(x): """ Return total number of bytes taken by the tensor. - + Args: x (Tensor): Input tensor. - + Returns: nbytes(int). """ return itemsize_(x) * F.shape_mul(shape_(x)) - - + + def strides_(x): """ Return the tuple of bytes to step in each dimension when traversing a tensor. - + Args: x (Tensor): Input tensor. - + Returns: strides (tuple[int]). """ @@ -184,12 +184,12 @@ def strides_(x): stride *= tensor_shape[j] strides += (stride,) return strides - - + + def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name """ Return a copy of the tensor, casted to a specified type. - + Args: dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format of :class:`mindspore.dtype.float32` or `float32`. @@ -197,16 +197,16 @@ def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name copy (bool, optional): By default, astype always returns a newly allocated tensor. If this is set to false, the input tensor is returned instead of a copy if possible. Default: True. - + Returns: Tensor, with the designated dtype. - + Raises: TypeError: If `dtype` has types not specified above, or values cannot be understood. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -219,35 +219,35 @@ def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name if not copy and dtype == x.dtype: return x return F.cast(x, dtype) - - + + def transpose(x, *axis): r""" Return a view of the tensor with axes transposed. - + For a 1-D tensor this has no effect, as a transposed vector is simply the same vector. For a 2-D tensor, this is a standard matrix transpose. For a n-D tensor, if axes are given, their order indicates how the axes are permuted. If axes are not provided and tensor.shape = (i[0], i[1],...i[n-2], i[n-1]), then tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]). - + Args: axes(Union[None, tuple(int), list(int), int], optional): If axes is None or blank, tensor.transpose() will reverse the order of the axes. If axes is tuple(int) or list(int), tensor.transpose() will transpose the tensor to the new axes order. If axes is int, this form is simply intended as a convenience alternative to the tuple/list form. - + Returns: Tensor, has the same dimension as input tensor, with axes suitably permuted. - + Raises: TypeError: If input arguments have types not specified above. ValueError: If the number of `axes` is not euqal to a.ndim. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -259,32 +259,32 @@ def transpose(x, *axis): ndim = F.rank(x) perm = check_transpose_axis_const(axis, ndim) return F.transpose(x, perm) - - + + # `tensor.T` is used as a property in graph mode T_ = transpose - - + + def reshape(x, *shape): """ Give a new shape to a tensor without changing its data. - + Args: shape(Union[int, tuple(int), list(int)]): The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - + Returns: Tensor, with new specified shape. - + Raises: TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor. ValueError: If new_shape is not compatible with the original shape. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> from mindspore import Tensor >>> from mindspore import dtype as mstype @@ -297,18 +297,18 @@ def reshape(x, *shape): """ new_shape = check_reshape_shp_const(shape) return F.reshape(x, new_shape) - - + + def ravel(x): """ Return a contiguous flattened tensor. - + Returns: Tensor, a 1-D tensor, containing the same elements of the input. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -318,27 +318,27 @@ def ravel(x): (24,) """ return reshape(x, (-1,)) - - + + def flatten(x, order='C'): r""" Return a copy of the tensor collapsed into one dimension. - + Args: order (str, optional): Can choose between 'C' and 'F'. 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran-style) order. Only 'C' and 'F' are supported. Default: 'C'. - + Returns: Tensor, has the same data type as input. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Raises: TypeError: If `order` is not string type. ValueError: If `order` is string type, but not 'C' or 'F'. - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -350,30 +350,30 @@ def flatten(x, order='C'): order = check_flatten_order_const(order) if order == 'C': return F.reshape(x, (-1,)) - + perm = F.make_range(0, F.rank(x)) new_order = F.tuple_reversed(perm) return F.reshape(F.transpose(x, new_order), (-1,)) - - + + def swapaxes(x, axis1, axis2): """ Interchange two axes of a tensor. - + Args: axis1 (int): First axis. axis2 (int): Second axis. - + Returns: Transposed tensor, has the same data type as the input. - + Raises: TypeError: If `axis1` or `axis2` is not integer. ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -383,12 +383,12 @@ def swapaxes(x, axis1, axis2): (4,3,2) """ axis1, axis2 = check_swapaxes_axis_const((axis1, axis2), x.ndim) - + if axis1 == axis2: return x if axis1 > axis2: axis1, axis2 = axis2, axis1 - + perm = F.make_range(0, x.ndim) new_perm = None if axis2 + 1 < x.ndim: @@ -397,27 +397,27 @@ def swapaxes(x, axis1, axis2): else: new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \ perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] - + return F.transpose(x, new_perm) - - + + def squeeze(x, axis=None): """ Remove single-dimensional entries from the shape of a tensor. - + Args: axis (Union[None, int, list(int), tuple(int)], optional): Default is None. - + Returns: Tensor, with all or a subset of the dimensions of length 1 removed. - + Raises: TypeError: If input arguments have types not specified above. - ValueError: If specified axis has shape entry :math:`> 1`. - + ValueError: If specified axis has shape entry :math:`> 1`. + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -432,27 +432,27 @@ def squeeze(x, axis=None): # yield squeezed shape based on the axes new_shape = prepare_shape_for_squeeze_const(shape, axis) return F.reshape(x, new_shape) - - + + def argmax(x, axis=None): """ Returns the indices of the maximum values along an axis. - + Args: axis (int, optional): By default, the index is into the flattened array, otherwise along the specified axis. Defaults to None. - + Returns: Tensor, array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. - + Raises: ValueError: if axis is out of range. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -468,28 +468,28 @@ def argmax(x, axis=None): else: axis = check_axis_in_range_const(axis, F.rank(x)) return P.Argmax(axis)(x) - - + + def argmin(x, axis=None): """ Returns the indices of the minimum values along an axis. - + Args: a (Union[int, float, bool, list, tuple, Tensor]): Input array. axis (int, optional): By default, the index is into the flattened array, otherwise along the specified axis. Defaults to None. - + Returns: Tensor, array of indices into the array. It has the same shape as a.shape with the dimension along axis removed. - + Raises: ValueError: if axis is out of range. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -506,16 +506,16 @@ def argmin(x, axis=None): axis = check_axis_in_range_const(axis, F.rank(x)) # P.Argmin is currently not supported return P.Argmax(axis)(F.neg_tensor(x)) - - + + def cumsum(x, axis=None, dtype=None): """ Returns the cumulative sum of the elements along a given axis. - + Note: If ``x.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result `dtype` will be elevated to :class:`int32`, :class:`int64` is not supported. - + Args: x (Tensor): Input tensor. axis (int, optional): Axis along which the cumulative sum is computed. The @@ -523,13 +523,13 @@ def cumsum(x, axis=None, dtype=None): dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as original, tensor, unless it has an integer dtype with a precision less than :class:`float32`. In that case, :class:`float32` is used. - + Returns: Tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -552,24 +552,24 @@ def cumsum(x, axis=None, dtype=None): if dtype is not None and original_dtype != dtype: return cumsum_(x, axis).astype(dtype, copy=False) return cumsum_(x, axis) - - + + def copy(x): """ Returns a copy of the tensor. - + Note: The current implementation does not support `order` argument. - + Args: x (Tensor): Input tensor. - + Returns: Copied tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -590,12 +590,12 @@ def copy(x): x = x / 1.0 x = x.astype(origin_dtype) return x - - + + def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disable=redefined-builtin """ Returns the maximum of a tensor or maximum along an axis. - + Args: x (Tensor): Input Tensor. axis (None or int or tuple of ints, optional): defaults to None. Axis or @@ -613,17 +613,17 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disab A boolean array which is broadcasted to match the dimensions of array, and selects elements to include in the reduction. If non-default value is passed, initial must also be provided. - + Returns: Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. - + Raises: TypeError: if the input is not a tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -635,12 +635,12 @@ def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disab """ return compile_utils.reduce_(x, P.ReduceMax(keepdims), cmp_fn=F.maximum, axis=axis, keepdims=keepdims, initial=initial, where=where) - - + + def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disable=redefined-builtin """ Returns the minimum of a tensor or minimum along an axis. - + Args: a (Tensor): Input data. axis (None or int or tuple of ints, optional): defaults to None. Axis or @@ -658,17 +658,17 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disab A boolean array which is broadcasted to match the dimensions of array, and selects elements to include in the reduction. If non-default value is passed, initial must also be provided. - + Returns: Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. - + Raises: TypeError: if the input is not a tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -680,26 +680,26 @@ def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disab """ return compile_utils.reduce_(x, P.ReduceMin(keepdims), cmp_fn=F.minimum, axis=axis, keepdims=keepdims, initial=initial, where=where) - - + + def resize(x, *new_shape): """ Changes shape and size of array in-place. - + Note: Instead of changing the size of the input array and returns nothing as in numpy, this method returns a new Tensor with the input size. Numpy argument `refcheck` is not supported. - + Args: new_shape (Union[ints, tuple of ints]): Shape of resized array. - + Returns: Tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> from mindspore import numpy as np >>> x = np.array([[0, 1], [2, 3]]) @@ -723,12 +723,12 @@ def resize(x, *new_shape): else: res = flattened[:new_size] return res.reshape(new_shape) - - + + def diagonal(x, offset=0, axis1=0, axis2=1): """ Returns specified diagonals. - + Args: offset (int, optional): Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal. @@ -738,16 +738,16 @@ def diagonal(x, offset=0, axis1=0, axis2=1): axis2 (int, optional): Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis. - + Returns: Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal. - + Raises: ValueError: if the input tensor has less than two dimensions. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> a = np.arange(4).reshape(2,2) @@ -762,7 +762,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1): if ndim < 2: const_utils.raise_value_error('diagonal requires an array of at least two dimensions') dtype = x.dtype - + axes = check_axis_valid((axis1, axis2), ndim) perm = () for i in range(ndim): @@ -770,10 +770,10 @@ def diagonal(x, offset=0, axis1=0, axis2=1): perm += (i,) perm += axes x = x.transpose(perm) - + shape = x.shape n, m = shape[-2:] - + e = F.eye(n, m, dtype) if offset >= m or offset <= -n: e = F.fill(dtype, (n, m), 0) @@ -788,10 +788,10 @@ def diagonal(x, offset=0, axis1=0, axis2=1): e_lower = e[0:n+offset:1, ...] e = P.Concat(0)((e_upper, e_lower)).astype(dtype) e = P.BroadcastTo(shape)(e) - + prod = F.tensor_mul(x, e) res = F.reduce_sum(prod.astype(mstype.float32), -1) - + begin = () for i in range(ndim-2): begin += (0,) @@ -805,12 +805,12 @@ def diagonal(x, offset=0, axis1=0, axis2=1): size += (last_dim_end,) res = F.tensor_slice(res, begin, size) return res.astype(dtype) - - + + def trace(x, offset=0, axis1=0, axis2=1, dtype=None): """ Returns the sum along diagonals of the array. - + Args: offset (int, optional): Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal. @@ -822,16 +822,16 @@ def trace(x, offset=0, axis1=0, axis2=1, dtype=None): second axis. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. - + Returns: Tensor, sum_along_diagonals. - + Raises: ValueError: if the input tensor has less than two dimensions. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> x = np.eye(3) @@ -846,36 +846,36 @@ def trace(x, offset=0, axis1=0, axis2=1, dtype=None): return F.fill(dtype, shape[:-1], 0) res = F.reduce_sum(d.astype(mstype.float32), -1) return res.astype(dtype) - - + + def take(x, indices, axis=None, mode='clip'): """ Takes elements from an array along an axis. - + Args: a (Tensor): Source array with shape `(Ni…, M, Nk…)`. indices (Tensor): The indices with shape `(Nj...)` of the values to extract. axis (int, optional): The axis over which to select values. By default, the flattened input array is used. Defaults to None. mode ('raise', 'wrap', 'clip', optional): Defaults to "clip". - + - edge: Pads with the edge values of `arr`. - raise: Raises an error; - wrap: Wraps around; - clip: Clips to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. - + Returns: Tensor, the indexed result. - + Raises: ValueError: if axis is out of range. TypeError: if the input is not a Tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> a = np.array([4, 3, 5, 7, 6, 8]) @@ -893,12 +893,12 @@ def take(x, indices, axis=None, mode='clip'): a = x ndim = a.ndim axis = check_axis_in_range_const(axis, ndim) - + shape_a = a.shape shape_indices = indices.shape size_indices = indices.size indices = compile_utils.check_indices(shape_a[axis], indices, mode) - + # reshapes indices to shape (Ni..., Nj..., Nk) shape_ni = tuple_slice(shape_a, None, axis) shape_nk = tuple_slice(shape_a, axis + 1, None) @@ -907,15 +907,15 @@ def take(x, indices, axis=None, mode='clip'): indices = indices.reshape(shape_indices) shape_indices = shape_ni + (indices.size,) + shape_nk indices = P.BroadcastTo(shape_indices)(indices) - + res = F.gather_d(a, axis, indices) return res.reshape(shape_out) - - + + def choose(x, choices, mode='clip'): """ Construct an array from an index array and a list of arrays to choose from. - + Args: choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must be broadcastable to the same shape. If `choices` is itself an array, then @@ -923,24 +923,24 @@ def choose(x, choices, mode='clip'): is taken as defining the "sequence". mode ('raise', 'wrap', 'clip', optional): Specifies how indices outside ``[0, n-1]`` will be treated: - + 'raise' – raise an error (default); - + 'wrap' – wrap around; - + 'clip' – clip to the range. 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. - + Returns: Tensor, the merged result. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Raises: ValueError: if ``len(condlist) != len(choicelist)``. - + Examples: >>> import mindspore.numpy as np >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] @@ -965,7 +965,7 @@ def choose(x, choices, mode='clip'): for choice in choicelist: tmp.append(P.BroadcastTo(shape_choice)(choice)) choices = F.stack(tmp) - + if x.ndim == 0 or choices.ndim == 0: const_utils.raise_value_error('input cannot be scalars') a = P.BroadcastTo(shape_choice)(x) @@ -974,7 +974,7 @@ def choose(x, choices, mode='clip'): a = a.astype(mstype.int32) choices = choices.astype(mstype.int32) a = compile_utils.check_indices(choices.shape[0], a, mode, allow_negative_index=False) - + grids = [] ndim = len(a.shape) for i in range(ndim): @@ -985,12 +985,12 @@ def choose(x, choices, mode='clip'): grid = P.Stack(-1)(grids) indices = P.Concat(-1)((a.reshape(a.shape + (1,)), grid)) return F.gather_nd(choices, indices).astype(dtype) - - + + def searchsorted(x, v, side='left', sorter=None): """ Finds indices where elements should be inserted to maintain order. - + Args: v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`. side ('left', 'right', optional): If 'left', the index of the first suitable @@ -999,16 +999,16 @@ def searchsorted(x, v, side='left', sorter=None): sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of integer indices that sort array `a` into ascending order. They are typically the result of argsort. - + Returns: Tensor, array of insertion points with the same shape as `v`. - + Raises: ValueError: if argument for `side` or `sorter` is invalid. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> from mindspore import numpy as np >>> x = np.array([1,2,3,4,5]) @@ -1030,7 +1030,7 @@ def searchsorted(x, v, side='left', sorter=None): less_op = F.tensor_le if side == 'left' else F.tensor_lt i = F.fill(mstype.int32, shape, 0) j = F.fill(mstype.int32, shape, a.size) - + sort_range = F.make_range(get_log2_size(F.shape_mul(a.shape) + 1)) for _ in sort_range: mid = (i - F.neg_tensor(j))//2 @@ -1038,29 +1038,29 @@ def searchsorted(x, v, side='left', sorter=None): i = F.select(mask, i, mid) j = F.select(mask, mid, j) return j - - + + def fill(x, value): """ Fills the array with a scalar value. - + Note: Unlike Numpy, tensor.fill() will always returns a new tensor, instead of filling the original tensor. - + Args: value (Union[None, int, float, bool]): All elements of a will be assigned this value. - + Returns: Tensor, with the original dtype and shape as input tensor. - + Raises: TypeError: If input arguments have types not specified above. ValueError: If `shape` has entries < 0. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import numpy as np >>> from mindspore import Tensor @@ -1077,30 +1077,30 @@ def fill(x, value): if not isinstance(value, (int, float, bool)): const_utils.raise_type_error("input value must be a scalar.") return F.fill(x.dtype, x.shape, value) - - + + def ptp(x, axis=None, keepdims=False): """ The name of the function comes from the acronym for "peak to peak". - + Note: Numpy arguments `dtype` and `out` are not supported. - + Args: x (Tensor): Input tensor. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed. The default is to compute the variance of the flattened array. Default: None. keepdims (bool): Default is False. - + Returns: Tensor. - + Raises: TypeError: if the input is not a tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> from mindspore import Tensor >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32") @@ -1116,21 +1116,21 @@ def ptp(x, axis=None, keepdims=False): else: check_axis_type(axis, True, True, False) axis = check_axis_valid(axis, x.ndim) - + return x.max(axis, keepdims) - x.min(axis, keepdims) - - + + def clip(x, xmin, xmax, dtype=None): """ Clips (limits) the values in an array. - + Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0, and values larger than 1 become 1. - + Note: Currently, clip with `nan` is not supported. - + Args: x (Tensor): Tensor containing elements to clip. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed @@ -1141,14 +1141,14 @@ def clip(x, xmin, xmax, dtype=None): to match their shapes. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. - + Returns: Tensor, a tensor with the elements of `x`, but where values < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> from mindspore import Tensor >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32") @@ -1176,20 +1176,20 @@ def clip(x, xmin, xmax, dtype=None): if dtype is not None and dtype != x.dtype: return x.astype(dtype) return x - - + + def var(x, axis=None, ddof=0, keepdims=False): """ Compute the variance along the specified axis. The variance is the average of the squared deviations from the mean, i.e., :math:`var = mean(abs(x - x.mean())**2)`. - + Return the variance, which is computed for the flattened array by default, otherwise over the specified axis. - + Note: Numpy arguments `dtype`, `out` and `where` are not supported. - + Args: x (Tensor): A Tensor to be calculated. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed. @@ -1197,13 +1197,13 @@ def var(x, axis=None, ddof=0, keepdims=False): ddof (int): Means Delta Degrees of Freedom. Default: 0. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements. keepdims (bool): Default: `False`. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Returns: Standard deviation tensor. - + Examples: >>> import mindspore.numpy as np >>> input_x = np.array([1., 2., 3., 4.]) @@ -1214,7 +1214,7 @@ def var(x, axis=None, ddof=0, keepdims=False): return nan_tensor.astype(x.dtype) if not isinstance(ddof, int) or not isinstance(keepdims, int): const_utils.raise_type_error("integer argument expected") - + if axis is None: axis = () else: @@ -1226,43 +1226,43 @@ def var(x, axis=None, ddof=0, keepdims=False): x_sum = _reduce_sum_keepdims(x_pow, axis) else: x_sum = _reduce_sum_default(x_pow, axis) - + if axis == (): axis = F.make_range(x.ndim) nums = 1 for ax in axis: nums *= x.shape[ax] return F.tensor_div(x_sum, nums - ddof) - - + + def std(x, axis=None, ddof=0, keepdims=False): """ Compute the standard deviation along the specified axis. The standard deviation is the square root of the average of the squared deviations from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`. - + Return the standard deviation, which is computed for the flattened array by default, otherwise over the specified axis. - + Note: Numpy arguments `dtype`, `out` and `where` are not supported. - + Args: x (Tensor): A Tensor to be calculated. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard deviation is computed. Default: `None`. - + If `None`, compute the standard deviation of the flattened array. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements. Default: 0. keepdims: Default: `False`. - + Returns: Standard deviation tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> input_x = np.array([1., 2., 3., 4.]) @@ -1271,16 +1271,16 @@ def std(x, axis=None, ddof=0, keepdims=False): """ x_var = var(x, axis, ddof, keepdims) return F.tensor_pow(x_var, 0.5) - - + + def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disable=redefined-builtin """ Return sum of array elements over a given axis. - + Note: Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are not supported. - + Args: x (Union[int, float, bool, list, tuple, Tensor]): Elements to sum. axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None. @@ -1296,19 +1296,19 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disab sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. initial (scalar): Starting value for the sum. - + Returns: Tensor. A tensor with the same shape as input, with the specified axis removed. If input tensor is a 0-d array, or if axis is None, a scalar is returned. - + Raises: TypeError: If input is not array_like or `axis` is not int or tuple of ints or `keepdims` is not integer or `initial` is not scalar. ValueError: If any axis is out of range or duplicate axes exist. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> input_x = np.array([-1, 0, 1]).astype('int32') @@ -1328,7 +1328,7 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disab axis = () else: axis = check_and_canonicalize_axes(axis, x.ndim) - + if not check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)): input_x = input_x.astype(mstype.float32) if 0 in x.shape: @@ -1340,29 +1340,29 @@ def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disab if initial is not None: res += initial return res.astype(dtype) - - + + def repeat(x, repeats, axis=None): """ Repeat elements of an array. - + Args: x (Tensor): Input tensor. repeats (Union[int, tuple, list]): The number of repetitions for each element. `repeats` is broadcasted to fit the shape of the given axis. axis (int, optional): The axis along which to repeat values. By default, use the flattened input tensor, and return a flat output tensor. - + Returns: Tensor, has the same shape as input tensor except along the given axis. - + Raises: ValueError: if axis is out of range. TypeError: if input is not a Tensor. - + Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - + Examples: >>> import mindspore.numpy as np >>> x = np.array(3) @@ -1391,7 +1391,7 @@ def repeat(x, repeats, axis=None): const_utils.raise_type_error('axes should be integers') check_axis_in_range_const(axis, x.ndim) axis = axis + x.ndim if axis < 0 else axis - + if len(repeats) == 1: repeats = repeats[0] if repeats == 0: @@ -1406,83 +1406,83 @@ def repeat(x, repeats, axis=None): if rep != 0: repeated_subs.append(repeat_elements(sub, rep, axis)) return P.Concat(axis)(repeated_subs) - - + + def getitem(data, index): """Implementation of `getitem`.""" return data.__getitem__(index) - - + + def setitem(data, index, value): """Implementation of `setitem`.""" return data.__setitem__(index, value) - - + + def item(data, *args): """Implementation of `item`.""" return compile_utils.tensor_item(data, *args) - - + + def itemset(data, *args): """Implementation of `itemset`.""" return compile_utils.tensor_itemset(data, *args) - - + + def ms_iter(xs): """Implementation of `iter`.""" return xs.__ms_iter__() - - + + def ms_next(it): """Implementation of `next`.""" return it.__ms_next__() - - + + def hasnext(it): """Implementation of `hasnext`.""" return it.__ms_hasnext__() - - + + def ms_len(data): """Implementation of `len`.""" return data.__len__() - - + + def floor(x): """Implementation of `floor`.""" return x.__floor__() - - + + def trunc(x): """Implementation of `trunc`.""" return x.__trunc__() - - + + def uadd(x): """Implementation of `uadd`.""" return x.__pos__() - - + + def usub(x): """Implementation of `usub`.""" return x.__neg__() - - + + def scalar_truediv(x, y): """Implementation of `scalar_truediv`.""" return x.__truediv__(y) - - + + def scalar_floordiv(x, y): """Implementation of `scalar_floordiv`.""" return x.__floordiv__(y) - - + + def bool_(x): """Implementation of `bool`.""" return x.__bool__() - - + + def enumerate_(x, start=0): """Enumerate list or tuple or tensor.""" x_type = F.typeof(x) @@ -1496,22 +1496,22 @@ def enumerate_(x, start=0): else: ret = zip(range(start, start + len(x)), x) return ret - - + + def expand_tensor_as(x, y): """Expand tensor""" broadcast_to = P.BroadcastTo(shape_(y)) return broadcast_to(x) - - + + def expand_dims(x, axis): """ Insert a dimension of shape 1 at the specified axis of Tensor """ check_is_int(axis, 'axis') return P.ExpandDims()(x, axis) - - + + def masked_fill(x, mask, value): """ Fills elements of self tensor with value where mask is True. @@ -1523,34 +1523,34 @@ def masked_fill(x, mask, value): mask = P.BroadcastTo(mask_shape)(mask) check_value_type('value', value, [int, float], "Tensor") return C.array_ops.masked_fill(x, mask, value) - - + + def narrow(x, axis, start, length): """ Returns a narrowed tensor from input tensor. The dimension axis is input from start to start + length. """ return F.narrow(x, axis, start, length) - - + + def view(x, *shape): """Reshape tensor, if shape is -1, reshape tensor into one dimension""" shape = check_view_shape(shape) return F.reshape(x, shape) - - + + @constexpr def check_is_tuple(x): """check whether x is tuple.""" return isinstance(x, mstype.Tuple) - - + + @constexpr def check_is_func(x): """check whether x is function.""" return isinstance(x, mstype.function_type) - - + + def isinstance_(x, base_type): """Determine whether x is an instance of base.""" x_type = F.typeof(x) @@ -1565,8 +1565,8 @@ def isinstance_(x, base_type): if check_is_func(F.typeof(base_type)) and base_type.__is_csr_func__(): cmp_type = mstype.csr_tensor_type return check_type_same(x_type, cmp_type) - - + + def while_cond(x): """For while condition, if the condition is a tensor, the loop will not be unrolled""" if F.issubclass_(F.typeof(x), F.typeof(mstype.tensor)): @@ -1574,8 +1574,8 @@ def while_cond(x): if is_cond: return F.cast(x, mstype.bool_) return x - - + + def coo_to_csr(x): """convert coo to csr.""" row_indices = x.indices[:, 0] @@ -1587,33 +1587,33 @@ def coo_to_csr(x): values = x.values[sort_idx] indptr = F.coo2csr(row_indices, x.shape[0]) return CSRTensor(indptr, col_indices, values, x.shape) - - + + def coo_to_dense(x): """convert coo to dense.""" zeros_tensor = F.zeros(x.shape, x.values.dtype) return F.tensor_scatter_update(zeros_tensor, x.indices, x.values) - - + + def csr_to_coo(x): """convert csr to coo.""" row_indices = F.csr2coo(x.indptr, x.values.shape[0]) coo_indices = P.Stack(1)((row_indices, x.indices)) return COOTensor(coo_indices, x.values, x.shape) - - + + def csr_to_dense(x): """convert csr to dense.""" coo_tensor = x.to_coo() return coo_tensor.to_dense() - - + + @constexpr def empty_tensor(dtype): """Return empty tensor""" return Tensor([], dtype) - - + + @constexpr def check_type_same(x_type, base_type): """Check x_type is same as base_type.""" @@ -1630,10 +1630,10 @@ def check_type_same(x_type, base_type): slice: mstype.Slice, } sparse_mstype_set = (mstype.csr_tensor_type,) - + has_int = False has_tensor = False - + def to_target_type(origin_type): try: if isinstance(origin_type, type): @@ -1642,7 +1642,7 @@ def check_type_same(x_type, base_type): ret_type = pytype_to_mstype[origin_type] elif origin_type in sparse_mstype_set: ret_type = origin_type - + if ret_type == mstype.Int: nonlocal has_int has_int = True @@ -1661,30 +1661,30 @@ def check_type_same(x_type, base_type): if (isinstance(x_type, mstype.Bool) and has_int) or (isinstance(x_type, mstype.ref_type) and has_tensor): return True return isinstance(x_type, target_type) - - + + @constexpr def get_itemsize(x_type): """get itemsize from tensor's dtype.""" return itemsize_map[x_type] - - + + @constexpr def check_is_tensor(x): """check whether x is tensor.""" if isinstance(x, mstype.tensor_type): return True return False - - + + @constexpr def check_is_tuple_or_list_or_tensor(x, op_name, arg_name): """check whether x is list or tuple or tensor.""" if isinstance(x, (mstype.List, mstype.Tuple, mstype.tensor_type)): return True raise TypeError(f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.") - - + + @constexpr def check_is_const_int(x, op_name, arg_name): """check whether x is const int.""" @@ -1693,16 +1693,16 @@ def check_is_const_int(x, op_name, arg_name): if not isinstance(x, int): raise TypeError(f"For '{op_name}', the '{arg_name}' should be a const int number, but got {x}.") return True - - + + @constexpr def check_is_tensor_bool_cond(shp): """check if tensor is a bool condition""" if shp in ((), (1,)): return True raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {shp}") - - + + @constexpr def const_tensor_to_bool(x): """convert bool tensor to bool condition""" @@ -1715,8 +1715,8 @@ def const_tensor_to_bool(x): return bool(x[0]) raise ValueError( f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {x.shape}") - - + + @constexpr def check_view_shape(x): """Check view function input shape""" @@ -1727,8 +1727,8 @@ def check_view_shape(x): raise ValueError(f"Only one tuple is needed, but got {x}") x = x[0] return x - - + + # convert normal param_check functions to constexpr functions check_astype_dtype_const = constexpr(validator.check_astype_dtype) check_transpose_axis_const = constexpr(validator.check_transpose_axis) @@ -1751,154 +1751,154 @@ check_type_support = constexpr(validator.check_type_support) check_is_int = constexpr(validator.check_is_int) check_type_name = constexpr(validator.check_type_name) check_value_type = constexpr(validator.check_value_type) - - + + def tensor_bool(x): """tensor as condition, if is constant, return immediate bool value""" is_cond = check_is_tensor_bool_cond(F.shape(x)) if is_cond and F.isconstant(x): return const_tensor_to_bool(x) return F.cast(x, mstype.bool_) - - + + def and_(x, y): """Implementation of `and` (`&`).""" return x.__and__(y) - - + + def or_(x, y): """Implementation of `or` (`|`).""" return x.__or__(y) - - + + def matmul(x, y): """Implementation of `matmul` (`@`).""" return x.__matmul__(y) - - + + def float_bool(x): """Implementation of `float_bool`.""" return x != 0.0 - - + + def int_bool(x): """Implementation of `int_bool`.""" return x != 0 - - + + def str_bool(x): """Implementation of `str_bool`.""" if x == "": return False return True - - + + def list_bool(x): """Implementation of `tuple_bool`.""" return len(x) != 0 - - + + def tuple_bool(x): """Implementation of `tuple_bool`.""" return len(x) != 0 - - + + def dict_bool(x): """Implementation of `dict_bool`.""" return len(x) != 0 - - + + def none_bool(x): """Implementation of `none_bool`.""" return False - - + + def func_bool(x): """Implementation of `func_bool`.""" return True - - + + def float_floordiv(x, y): """Implementation of `float_floordiv`.""" return floor(x / y) - - + + ############# # Iteration # ############# - - + + @dataclass(frozen=True) class SequenceIterator: """ SequenceIterator is a util dataclass for iterating sequence object. - + Iterator to use for sequences like List, Array. """ - + idx: int seq: list - + @core(ignore_values=True) def __ms_hasnext__(self): """Whether the index is past the length of the sequence.""" return self.idx < ms_len(self.seq) - + @core(ignore_values=True) def __ms_next__(self): """Return the next element and a new iterator.""" return self.seq[self.idx], SequenceIterator(self.idx + 1, self.seq) - - + + def list_iter(xs): """Iterator for List.""" return SequenceIterator(0, xs) - - + + def array_iter(xs): """Iterator for Array.""" return SequenceIterator(0, xs) - - + + def tuple_next(xs): """Next tuple.""" return xs[0], tail(xs) - - + + def tuple_hasnext(xs): """Whether the tuple is empty or not.""" return len(xs) > 0 - - + + def list_next(xs): """Next list.""" return xs[0], tail(xs) - - + + def list_hasnext(xs): """Whether the list is empty or not.""" return len(xs) > 0 - - + + # pylint: disable=redefined-outer-name def list_append(self_, item): return _append(self_, item) - - + + def list_insert(self_, index, obj): """Insert into list""" return _insert(self_, index, obj) - + ################# # Array methods # ################# - - + + def to_array(x): """Implementation of `to_array`.""" return x.__ms_to_array__() - - + + def filter_(fun, iter_): """Support the use of built-in function filter.""" result = [] @@ -1906,71 +1906,71 @@ def filter_(fun, iter_): if fun(elem): result.append(elem) return result - + ################## # Sparse methods # ################## - - + + def csr_astype(x, dtype): """Implementation of `astype` for CSRTensor.""" data = x.values.astype(dtype) return F.make_csr_tensor(x.indptr, x.indices, data, x.shape) - - + + def csr_sum(x, axis): """Implementation of `sum` for CSRTensor.""" return F.csr_reduce_sum(x, axis) - - + + def csr_abs(x): """Implementation of `abs` for CSRTensor.""" data = F.absolute(x.values) return F.make_csr_tensor(x.indptr, x.indices, data, x.shape) - - + + def csr_mv(x, dense_vector): """Implementation of `abs` for CSRTensor.""" check_value_type('dense_vector', dense_vector, (Tensor,), 'CSRTensor.mv') return F.csr_mv(x, dense_vector) - - + + def csr_to_tuple(x): """Implementation of `to_tuple` for CSRTensor.""" res = (x.indptr, x.indices, x.values, x.shape) return res - - + + def coo_astype(x, dtype): """Implementation of `astype` for COOTensor.""" data = x.values.astype(dtype) return F.make_coo_tensor(x.indices, data, x.shape) - - + + def coo_to_tuple(x): """Implementation of `to_tuple` for COOTensor.""" return x.indices, x.values, x.shape - - + + def coo_abs(x): """Implementation of `abs` for COOTensor.""" data = F.absolute(x.values) return F.make_coo_tensor(x.indices, data, x.shape) - + ################ # Sparse Attrs # ################ - - + + def sparse_size_(x): """ Return the size of SparseTensor.values. That is the number of non-zero values in SparseTensor. """ return size_(x.values) - - + + def sparse_ndim_(x): """ Return the ndim of SparseTensor, according to its dense shape. """ - return F.tuple_len(x.shape) + return F.tuple_len(x.shape) \ No newline at end of file diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/trope.py b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/trope.py index 84ec9562..f5d06128 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/parse/trope.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/parse/trope.py @@ -50,55 +50,45 @@ __all__ = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'eq', 'ne', 'lt', def MakeTuple(*elts): # pragma: no cover - """Tuple builder.""" + """Tuple builder.""" # 创建元组的构造函数 raise RuntimeError('This operation is not meant to be called directly.') - def make_dict(key, value): # pragma: no cover - """Dict builder.""" + """Dict builder.""" # 创建字典的构造函数 raise RuntimeError('This operation is not meant to be called directly.') - def make_list(*elts): # pragma: no cover - """List builder.""" + """List builder.""" # 创建列表的构造函数 raise RuntimeError('This operation is not meant to be called directly.') - def make_slice(*elts): # pragma: no cover - """Slice builder.""" + """Slice builder.""" # 创建切片的构造函数 raise RuntimeError('This operation is not meant to be called directly.') - def make_range(*elts): # pragma: no cover - """Range tuple builder.""" + """Range tuple builder.""" # 创建范围元组的构造函数 raise RuntimeError('This operation is not meant to be called directly.') - def switch(cond, tb, fb): # pragma: no cover - """Switch statement, returns one of the two values.""" + """Switch statement, returns one of the two values.""" # 返回两个值中的一个的开关语句 raise RuntimeError('This operation is not meant to be called directly.') - def hasnext(it): # pragma: no cover - """Hasnext function.""" + """Hasnext function.""" # 判断是否有下一个元素的函数 raise RuntimeError('This operation is not meant to be called directly.') - def to_array(x): - """The to_array function.""" + """The to_array function.""" # 将输入转换为数组的函数 raise RuntimeError('This operation is not meant to be called directly.') - def not_contains(x): # pragma: no cover - """Not in function.""" + """Not in function.""" # 判断元素是否不在集合中的函数 raise RuntimeError('This operation is not meant to be called directly.') - def while_cond(x): # pragma: no cover - """Not in function.""" + """Not in function.""" # 判断条件是否成立的函数 raise RuntimeError('This operation is not meant to be called directly.') - def bool_(x): # pragma: no cover - """judge true function.""" + """judge true function.""" # 判断一个值是否为真值的函数 raise RuntimeError('This operation is not meant to be called directly.') diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server.py b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server.py index 72f589f3..02678768 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server.py @@ -16,27 +16,37 @@ import os from mindspore import log as logger from mindspore._extends.parallel_compile.akg_compiler.akg_process import create_akg_parallel_process - - + + class Messager: - + '''Messager''' - + def __init__(self, fdin, fdout): + """ + 初始化 Messager 类 + + Args: + fdin: 输入文件描述符 + fdout: 输出文件描述符 + """ self.fdin = fdin self.fdout = fdout self.fin = os.fdopen(fdin, "r") self.fout = os.fdopen(fdout, "w") self.message = '' - + def __del__(self): + """ + 删除 Messager 实例时关闭文件描述符 + """ os.close(self.fdin) os.close(self.fdout) - + def get_message(self): """ - Get message from remote - + 从远程获取消息 + Returns: message """ @@ -58,13 +68,13 @@ class Messager: self.send_ack() self.exit() return self.message - + def send_res(self, res, keep_format=True): """ - Send result to remote - + 发送结果到远程 + Args: - keep_format: True or False + keep_format: True 或 False """ logger.debug(f"[OUT] {str(res)}") if keep_format: @@ -72,7 +82,7 @@ class Messager: else: res_str = str(res).replace('\n', '').replace('\r', '').replace(' ', '') tag = '[~]' # The same as client kTAG - + # Not write by print(tag + res_str, flush=True) any more try: self.fout.write(tag + res_str + "\n") @@ -82,69 +92,76 @@ class Messager: self.exit() finally: pass - + def send_ack(self, success=True): """ - Send ack to remote - + 发送确认消息到远程 + Args: - success: True or False + success: True 或 False """ if success: self.send_res('ACK') else: self.send_res('ERR') - + def loop(self): """ - Messaging loop + 消息循环 """ while True: self.handle() - + def run(self): + """运行消息循环""" self.loop() - + def handle(self): """ - A interface communicates with remote. - + 与远程通信的接口。 + Note: - All subclasses should override this interface. + 所有子类应该重写此接口。 """ raise NotImplementedError - + def exit(self): """ - A interface handles the procedure before exit. - + 处理退出之前的程序。 + Note: - All subclasses should override this interface. + 所有子类应该重写此接口。 """ raise NotImplementedError - - + + class AkgBuilder(): """Akg building wrapper""" - + def __init__(self, platform): + """ + 初始化 AkgBuilder 类 + + Args: + platform: 平台标识 + """ self.platform = platform self.attrs = None - + def create(self, process_num, waitime): - """ Create akg processor""" + """ 创建 akg 处理器""" self.akg_processor = create_akg_parallel_process(process_num, waitime, self.platform) - + def accept_json(self, json): - """ Accept json""" + """ 接受 json 数据""" return self.akg_processor.accept_json(json) - + def compile(self): - """Compile""" + """编译""" return self.akg_processor.compile(self.attrs) - + def handle(self, messager, arg): - """Handle message about akg""" + """处理关于 akg 的消息""" if arg == 'AKG/START': messager.send_ack() process_num_str = messager.get_message() @@ -172,7 +189,8 @@ class AkgBuilder(): break else: raise RuntimeError("Unknown message type: %s" % arg) - - + + def get_logger(): - return logger + """获取日志记录器""" + return logger \ No newline at end of file diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_akg.py b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_akg.py index bd1ee1fd..a81c1100 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_akg.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_akg.py @@ -20,19 +20,24 @@ from mindspore._extends.remote.kernel_build_server import Messager, get_logger, class AkgMessager(Messager): ''' - Default Messager for akg kernels. - It works as a server, communicating with c++ client. + 默认的 akg 内核消息处理器。 + 它作为一个服务器,与 C++ 客户端进行通信。 ''' def __init__(self, fdin, fdout): + """ + 初始化 AkgMessager 实例。 + :param fdin: 输入文件描述符 + :param fdout: 输出文件描述符 + """ super().__init__(fdin, fdout) get_logger().info("[TRACE] Akg Messager init...") self.akg_builder = AkgBuilder("default") def handle(self): """ - Communicate with remote client. - Reference protocol between them at PR#4063 + 与远程客户端进行通信。 + 它们之间的参考协议见 PR#4063。 """ arg = self.get_message() if "AKG" in arg: @@ -42,11 +47,18 @@ class AkgMessager(Messager): self.exit() def exit(self): + """ + 退出 AkgMessager。 + """ get_logger().info("[TRACE] Akg Messager Exit...") exit() if __name__ == '__main__': + """ + 程序入口。 + 检查命令行参数并初始化 AkgMessager 实例。 + """ warnings.simplefilter("ignore") if len(sys.argv) != 3: raise Exception('Incorrect argv: {}'.format(sys.argv)) diff --git a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_ascend.py b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_ascend.py index dc276dca..65469320 100644 --- a/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_ascend.py +++ b/src/mindspore2022/mindspore/python/mindspore/_extends/remote/kernel_build_server_ascend.py @@ -16,23 +16,24 @@ import sys import warnings import json - + from mindspore._extends.parallel_compile.tbe_compiler.tbe_job_manager import TbeJobManager from mindspore._extends.remote.kernel_build_server import Messager, get_logger, AkgBuilder - - + + class AscendMessager(Messager): """ Ascend Messager It works as a server, communicating with c++ client. """ - + # 初始化方法 def __init__(self, fdin, fdout): super().__init__(fdin, fdout) get_logger().info("[TRACE] Ascend Messager init...") self.tbe_builder = TbeJobManager() self.akg_builder = AkgBuilder("ASCEND") - + + # 处理与远程客户端的通信 def handle(self): """ Communicate with remote client. @@ -51,7 +52,7 @@ class AscendMessager(Messager): self.exit() finally: pass - + if "job_type" in job_json: res = self.tbe_builder.job_handler(arg) self.send_res(res) @@ -59,17 +60,18 @@ class AscendMessager(Messager): get_logger().error("[TRACE] Request is not a TBE Job message: {}".format(arg)) self.send_ack(False) self.exit() - + + # 退出方法 def exit(self): self.tbe_builder.reset() get_logger().info("[TRACE] Ascend Messager Exit...") exit() - - + + if __name__ == '__main__': warnings.simplefilter("ignore") if len(sys.argv) != 3: raise Exception('Incorrect argv: {}'.format(sys.argv)) get_logger().debug(f"[TRACE] argv: {str(sys.argv)}") messager = AscendMessager(int(sys.argv[1]), int(sys.argv[2])) - messager.run() + messager.run() \ No newline at end of file