|
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- import collections
- import warnings
- import copy
- import numpy as np
- from scipy import signal
- import mindspore as ms
- import mindspore.numpy
- from mindspore import ops
- from mindspore.common import dtype as mstype
- from mindspore.ops import constexpr
- from ms_adapter.pytorch.tensor import tensor, cast_tensor, cast_to_ms_tensor, cast_to_adapter_tensor
- from ms_adapter.utils import unsupported_attr, get_backend, pynative_mode_condition
- from ms_adapter.pytorch.tensor import Tensor as adapter_tensor
- from ms_adapter.pytorch.common._inner import _out_inplace_assign, _out_limit_pynative
- from ms_adapter.pytorch.common.dtype import _TypeDict
-
-
- def empty(*size, out=None, dtype=None, layout=None, \
- device=None, requires_grad=False, pin_memory=False, \
- memory_format=None):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(pin_memory)
- unsupported_attr(memory_format)
- if dtype is None:
- dtype = ms.float32
-
- _size = size
- if isinstance(size[0], (tuple, list)):
- _size = size[0]
- output = ms.numpy.empty(_size, dtype)
- return _out_inplace_assign(out, output, "empty")
-
-
- def eye(n, m=None, *, out=None, dtype=None, layout=None, \
- device=None, requires_grad=False):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
-
- if m is None:
- m = n
- if dtype is None:
- dtype = ms.float32
-
- output = ms.ops.eye(n, m, dtype)
- return _out_inplace_assign(out, output, "eye")
-
-
- def cat(tensors, dim=0, *, out=None):
- if tensors is None:
- raise ValueError('`tensors` in `{}` should not be None'.format(cat.__name__))
-
- if not isinstance(tensors, (tuple, list)):
- raise TypeError('`tensors` in `{}` should be tuple or list'.format(cat.__name__))
-
- inputs = cast_to_ms_tensor(tensors)
- output = ops.concat(inputs, dim)
- return _out_inplace_assign(out, output, "cat")
-
- def concat(tensors, dim=0, *, out=None):
- if tensors is None:
- raise ValueError('`tensors` in `{}` should not be None'.format(concat.__name__))
-
- if not isinstance(tensors, (tuple, list)):
- raise TypeError('`tensors` in `{}` should be tuple or list'.format(concat.__name__))
-
- inputs = cast_to_ms_tensor(tensors)
- output = ops.concat(inputs, dim)
- return _out_inplace_assign(out, output, "concat")
-
- def ones(*size, out=None, dtype=None, layout=None,
- device=None, requires_grad=False):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
-
- if dtype is None:
- dtype = ms.float32
-
- if isinstance(size[0], (tuple, list)):
- output = ms.ops.ones(*size, type=dtype)
- else:
- output = ms.ops.ones(size, type=dtype)
- return _out_inplace_assign(out, output, "ones")
-
-
- def stack(tensors, dim = 0, *, out=None):
- tensors = cast_to_ms_tensor(tensors)
- output = ops.stack(tensors, dim)
- return _out_inplace_assign(out, output, "stack")
-
-
- def meshgrid(*tensors, indexing='ij'):
- if isinstance(tensors[0], (list, tuple)):
- input_tensor = tuple(*tensors)
- else:
- input_tensor = tensors
-
- @cast_tensor
- def _call_ms_api(input_tensor):
- return mindspore.ops.meshgrid(input_tensor, indexing=indexing)
- return _call_ms_api(input_tensor)
-
-
- def log(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ops.log(input)
- return _out_inplace_assign(out, output, "log")
-
-
- def chunk(input, chunks, dim=0):
- input_shape = input.shape
- dim_size = input_shape[dim]
- if dim_size % chunks != 0:
- raise ValueError("Until now, For 'ms_adapter.pytorch.chunk', the value of `input.shape[dim]` "
- "should be divisible by `chunk`, but got input.shape[{}]:{}, chunks:{}."
- .format(dim ,dim_size, chunks))
-
- @cast_tensor
- def _call_ms_api(input):
- return mindspore.ops.split(input, dim, chunks)
- return _call_ms_api(input)
-
-
- def diag(input, diagonal=0, *, out=None):
- # TODO
- # May be use mindspore.ops.diag instead. Nowadays, this operator do not support CPU.
- input = cast_to_ms_tensor(input)
- output = ms.numpy.diag(input, diagonal)
- return _out_inplace_assign(out, output, "diag")
-
-
- def sqrt(input, *, out=None):
- if input.dtype == mstype.int32 or input.dtype == mstype.int64:
- input = input.astype(mstype.float32)
-
- input = cast_to_ms_tensor(input)
- output = ops.sqrt(input)
- return _out_inplace_assign(out, output, "sqrt")
-
-
- def mm(input, mat2, *, out=None):
- output_type = input.dtype
- if input.dtype == mstype.int32 or input.dtype == mstype.int64:
- input = input.astype(mstype.float32)
-
- input1 = cast_to_ms_tensor(input)
- input2 = cast_to_ms_tensor(mat2)
- output = ops.matmul(input1, input2)
- output = ops.cast(output, output_type)
- return _out_inplace_assign(out, output, "mm")
-
-
- def zeros(*size, out=None, dtype=None, device=None, requires_grad=False):
- unsupported_attr(device)
- unsupported_attr(requires_grad)
-
- if isinstance(size[0], (tuple, list)):
- size = tuple(size[0])
-
- if len(size) < 2:
- raise ValueError("Until now, For 'ms_adapter.pytorch.zeros', the size of `size` sholud bigger than 1, "
- "but got {}.".format(len(size)))
-
- if dtype is None:
- dtype = mstype.float32
-
- output = ms.ops.zeros(size, dtype)
- return _out_inplace_assign(out, output, "zeros")
-
-
- def div(input, other, *, rounding_mode=None, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- if rounding_mode is None:
- if input.dtype == mstype.int64 or input.dtype == mstype.int32:
- input = ops.cast(input, mstype.float32)
- output = ms.ops.div(input, other)
-
- if rounding_mode == "trunc":
- output = ms.ops.div(input, other)
- if input.dtype == ms.int64:
- dtype_ = output.dtype
- output = ms.numpy.trunc(output, dtype=dtype_)
- else:
- output = ms.ops.trunc(output)
-
- if rounding_mode == "floor":
- input_dtype = input.dtype
- output = ms.ops.floor_div(input, other)
- output = ms.ops.cast(output, input_dtype)
-
- return _out_inplace_assign(out, output, "div")
-
-
- def flatten(input, start_dim=0, end_dim=-1):
- @constexpr
- def get_dst_shape():
- input_shape = input.shape
- rank = len(input_shape)
- start = start_dim
- end = end_dim
-
- if start < 0:
- start += rank
-
- if end < 0:
- end += rank
-
- dst_shape = []
- i = 0
- while i != start:
- dst_shape.append(input_shape[i])
- i = i + 1
-
- flatten_shape = 1
- while i <= end:
- flatten_shape = flatten_shape * input_shape[i]
- i = i + 1
- dst_shape.append(flatten_shape)
-
- while i < rank:
- dst_shape.append(input_shape[i])
- i = i + 1
-
- return tuple(dst_shape)
-
- shape = get_dst_shape()
-
- @cast_tensor
- def _call_ms_api(input):
- return ms.ops.reshape(input, shape)
- return _call_ms_api(input)
-
-
- def unflatten(input, dim, sizes):
- @constexpr
- def get_unflatten_size():
- input_shape = input.shape
- input_rank = len(input_shape)
- if not isinstance(sizes, (tuple, list)):
- raise TypeError(f"Type of `sizes` should be `Tuple` or `List`, but got {type(sizes)}")
-
- if len(sizes) == 0:
- raise ValueError("`sizes` must be non-empty")
-
- if isinstance(dim, str):
- raise TypeError("Until Now, `dim` not support type of str in `unflatten`")
-
- _dim = dim
- if _dim < 0:
- _dim += input_rank
-
- if _dim < 0 or _dim >= input_rank:
- raise ValueError("`dim` should be in range [{}, {}), but got {}".format(
- -input_rank, input_rank, dim))
-
- input_shape_list = list(input_shape)
- sizes_list = list(sizes)
- _sizes_mul = 1
- for s in sizes_list:
- _sizes_mul *= s
- if _sizes_mul != input_shape_list[_dim]:
- raise ValueError(f"unflatten: Provided `sizes` {sizes_list} don't multiply up to the"
- f"size of dim {dim} ({input_shape_list[_dim]}) in the input tensor")
-
- out_shape = input_shape[:_dim] + tuple(sizes) + input_shape[_dim + 1:]
- return out_shape
-
- out_shape = get_unflatten_size()
- input = cast_to_ms_tensor(input)
- out = ms.ops.reshape(input, out_shape)
- return cast_to_adapter_tensor(out)
-
-
- def transpose(input, dim0, dim1):
- @constexpr
- def _check_dim(dim, rank):
- if dim >= rank or dim < -rank:
- raise ValueError("dim is out of bound, should be in range [{}, {})"
- .format(-rank, rank))
-
- @constexpr
- def _get_perm():
- rank = len(input.shape)
- _check_dim(dim0, rank)
- _check_dim(dim1, rank)
- _perm = list(range(rank))
- _perm[dim0] = dim1
- _perm[dim1] = dim0
- return tuple(_perm)
-
- @cast_tensor
- def _call_ms_api(input):
- return ms.ops.transpose(input, _get_perm())
- return _call_ms_api(input)
-
-
- def multinomial(input, num_samples, replacement=False, *, generator=None, out=None):
- unsupported_attr(generator)
- if generator is not None:
- warnings.warn("torch.multinomal don't support generator now.")
- input_tensor = cast_to_ms_tensor(input).astype(mstype.float32)
- output = ms.ops.multinomial(input_tensor, num_samples, replacement)
- return _out_inplace_assign(out, output, "multinomial")
-
-
- def randperm(n, *, generator=None, out=None, dtype=mstype.int64, layout=None, device=None,
- requires_grad=False, pin_memory=False):
- unsupported_attr(generator)
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(pin_memory)
-
- if generator is not None:
- warnings.warn("torch.randperm don't support generator now.")
- if layout is not None:
- warnings.warn("torch.randperm don't support layout now.")
-
- output = np.random.permutation(n)
- output = tensor(output, dtype=dtype)
- return _out_inplace_assign(out, output, "randperm")
-
-
- def randint(low, high, size, *, generator=None, out=None, dtype=None, layout=None,
- device=None, requires_grad=False):
- unsupported_attr(generator)
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
-
- if generator is not None:
- warnings.warn("torch.randperm don't support generator now.")
- if layout is not None:
- warnings.warn("torch.randperm don't support layout now.")
-
- output = np.random.randint(low, high, size)
- output = tensor(output, dtype=dtype)
- return _out_inplace_assign(out, output, "randint")
-
-
- def as_tensor(data, dtype=None, device=None):
- unsupported_attr(device)
-
- if isinstance(data, (tuple, list)):
- data = [i.data.item() if isinstance(i, adapter_tensor) else i for i in data ]
-
- output = ms.Tensor(data, dtype=dtype)
- return cast_to_adapter_tensor(output)
-
-
- def zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(memory_format)
- input_x = ms.Tensor(input, dtype=dtype)
- output = ms.ops.ZerosLike()(input_x)
- return cast_to_adapter_tensor(output)
-
-
- def ones_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(memory_format)
- input_x = ms.Tensor(input, dtype=dtype)
- output = ms.ops.OnesLike()(input_x)
- return cast_to_adapter_tensor(output)
-
-
-
- def empty_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(memory_format)
- if dtype is None:
- dtype = ms.float32
- ouput = ms.numpy.empty_like(input, dtype=dtype)
- return cast_to_adapter_tensor(ouput)
-
-
- def full(size, fill_value, out=None, dtype=None, layout=None, device=None, requires_grad=False):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- output = ms.numpy.full(size, fill_value, dtype)
- return _out_inplace_assign(out, output, "full")
-
-
- def full_like(input, fill_value, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(memory_format)
- output = ms.numpy.full_like(input, fill_value=fill_value, dtype=dtype)
- return cast_to_adapter_tensor(output)
-
-
- def where(condition, x, y):
- output = ms.numpy.where(condition, x, y)
- return cast_to_adapter_tensor(output)
-
-
- def seed():
- value = np.floor(np.random.random(1) * 2**32 - 1)
- return ms.set_seed(int(value))
-
-
- def manual_seed(seed):
- return ms.set_seed(seed)
-
-
- def initial_seed():
- return ms.get_seed()
-
-
- def rand(*size, out=None, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- unsupported_attr(pin_memory)
- if dtype is None:
- dtype = ms.float32
- output = ms.numpy.rand(*size, dtype=dtype)
- return _out_inplace_assign(out, output, "rand")
-
-
- def linspace(start, end, steps, out=None, dtype=None, device=None, requires_grad=False):
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- if dtype is None:
- dtype = ms.float32
- start = ms.Tensor(start, dtype)
- end = ms.Tensor(end, dtype)
- output = ms.ops.linspace(start, end, steps)
- return _out_inplace_assign(out, output, "linspace")
-
-
- def take(input, index):
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- index = ms.numpy.array(index)
- output = ms.numpy.take(input, index)
- return cast_to_adapter_tensor(output)
-
-
- def abs(input, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.numpy.abs(input)
- return _out_inplace_assign(out, output, "abs")
-
-
- def atan2(input, other, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.atan2(input, other)
- return _out_inplace_assign(out, output, "atan2")
-
-
- def clamp(input, min=None, max=None, out=None):
- input_ms = cast_to_ms_tensor(input)
- type = input_ms.dtype
- if min is not None and max is not None and min > max:
- output = ms.ops.ones_like(input_ms).astype(type)*max
- else:
- if min is not None:
- min = ms.Tensor(min, type)
- if max is not None:
- max = ms.Tensor(max, type)
- output = ms.ops.clip_by_value(input_ms, min, max)
- return _out_inplace_assign(out, output, "clamp")
-
-
- def cos(input, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.cos(input)
- return _out_inplace_assign(out, output, "cos")
-
-
- class Device():
- def __init__(self, target, index):
- self.type = target
- self.index = index
- def __repr__(self):
- if self.index is None:
- return f"device(type='{self.type}')"
- return f"device(type='{self.type}', index={self.index})"
-
- @constexpr
- def device(type=None, index=None):
- if type is not None:
- if isinstance(type, str):
- if ':' in type:
- if index is not None:
- raise ValueError(f"`type` must not include an index because index was passed explicitly: {type}")
- _target, _id = type.split(':')
- _id = int(_id)
- else:
- _target = type
- _id = index
- return Device(_target, _id)
-
- if isinstance(type, int):
- return Device(get_backend(), type)
-
- if isinstance(type, Device):
- if index is not None:
- raise ValueError("torch.device(): When input is torch.device, `index` can not be set.")
- return Device(type.type, type.index)
-
- raise TypeError("torch.device(): `type` must be type of 'str' or 'torch.device'.")
-
- raise ValueError("torch.device(): `type` can not be None")
-
-
- def fmod(input, other, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- input = ms.numpy.array(input)
- other = ms.numpy.array(other)
- output = ms.numpy.fmod(input, other)
- return _out_inplace_assign(out, output, "fmod")
-
-
- def frac(input, out=None):
- #TODO outout = input - floor(abs(input)) * sing(input)
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- output = input - ms.numpy.floor(ms.numpy.abs(input)) * ms.numpy.sign(input)
- return _out_inplace_assign(out, output, "frac")
-
-
- def log10(input, out=None):
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- output = ms.numpy.log10(input)
- return _out_inplace_assign(out, output, "log10")
-
-
- def log1p(input, out=None):
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- output = ms.numpy.log1p(input)
- return _out_inplace_assign(out, output, "log1p")
-
-
- def log2(input, out=None):
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- output = ms.numpy.log2(input)
- return _out_inplace_assign(out, output, "log2")
-
-
- def sin(input, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.sin(input)
- return _out_inplace_assign(out, output, "sin")
-
-
- def max(input, dim=None, keepdim=False, *, out=None):
- #TODO: not supprt GRAPH_MODE, not supper max(input, other)
- input = cast_to_ms_tensor(input)
- type = input.dtype
- input = input.astype(ms.float32)
- if dim is None:
- output = input.max(axis=dim, keepdims=keepdim).astype(type)
- if out is not None:
- ops.assign(out, output)
- return out
- return cast_to_adapter_tensor(output)
- output = list(ms.ops.max(input, axis=dim, keep_dims=keepdim))
- value = output[1].astype(type)
- indice = output[0]
- point = collections.namedtuple('max', 'values,indices')
- rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
- if out is not None:
- if pynative_mode_condition():
- if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
- raise TypeError("In ms_adapter.torch.max(), `out` should be tuple of Tensors.")
- out[0].assign_value(value)
- out[1].assign_value(indice)
- return out
- else:
- raise ValueError('In MindSpore static graph mode, `out` in `max` shoud be None, '
- 'please set out=None and use return value instead of `out`.')
- return rlt
-
-
- def min(input, dim=None, keepdim=False, *, out=None):
- # TODO: Right Now, not support 'min(input, other, *, out=None)'
- input = cast_to_ms_tensor(input)
- if dim is None:
- return cast_to_adapter_tensor(input.min())
-
- indices, result = ms.ops.min(input, axis=dim, keep_dims=keepdim)
- if out is not None:
- if pynative_mode_condition():
- if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
- raise TypeError("In ms_adapter.torch.min(), `out` should be tuple of Tensors.")
- out[0].assign_value(result)
- out[1].assign_value(indices)
- return out
- else:
- raise ValueError('In MindSpore static graph mode, `out` in `min` shoud be None, '
- 'please set out=None and use return value instead of `out`.')
- return cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices)
-
-
- def mean(input, dim=None, keepdim=False, *, dtype=None, out=None):
- # TODO: not supprt GRAPH_MODE
- input = cast_to_ms_tensor(input)
- if dtype is not None:
- input = input.astype(dtype)
- if dim is not None:
- output = ms.ops.mean(input, axis=dim, keep_dims=keepdim)
- else:
- output = ms.ops.mean(input, keep_dims=keepdim)
- return _out_inplace_assign(out, output, "mean")
-
-
- def round(input, *, decimals=0, out=None):
- input = cast_to_ms_tensor(input)
- if decimals == 0:
- output = ms.ops.round(input)
- else:
- p = 10**decimals
- input = input*p
- output = ms.ops.round(input)/p
- return _out_inplace_assign(out, output, "round")
-
-
- def floor(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.floor(input)
- return _out_inplace_assign(out, output, "floor")
-
-
- def ceil(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.ceil(input)
- return _out_inplace_assign(out, output, "ceil")
-
-
- def sign(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.Sign()(input)
- return _out_inplace_assign(out, output, "sign")
-
-
- def pow(input, exponent, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input)
- if isinstance(exponent, adapter_tensor):
- exponent = cast_to_ms_tensor(exponent)
- output = ms.ops.pow(input, exponent)
-
- return _out_inplace_assign(out, output, "pow")
-
-
- def exp(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- if input.dtype != ms.float64:
- input = input.astype(ms.float32)
- output = ms.ops.exp(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "exp")
-
-
- def ge(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- output = ms.ops.ge(input, other)
- return _out_inplace_assign(out, output, "ge")
-
-
- def gt(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- output = ms.ops.gt(input, other)
- return _out_inplace_assign(out, output, "gt")
-
-
- def le(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- output = ms.ops.le(input, other)
- return _out_inplace_assign(out, output, "le")
-
-
- def lt(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- output = 1 - ms.ops.ge(input, other)
- output = output.astype(ms.bool_)
- return _out_inplace_assign(out, output, "lt")
-
-
- def sum(input, dim=None, keepdim=False, *, dtype=None, out=None):
- input = cast_to_ms_tensor(input)
- if dtype is not None:
- input = input.astype(dtype)
- output = input.sum(axis=dim, keepdims=keepdim)
- return _out_inplace_assign(out, output, "sum")
-
-
- def median(input, dim=None, keepdim=False, *, out=None):
- input = cast_to_ms_tensor(input)
- if dim is None:
- output, _ = ms.ops.median(input, global_median=True, keep_dims=keepdim)
- return cast_to_adapter_tensor(output)
- else:
- output = list(ms.ops.median(input, axis=dim, keep_dims=keepdim))
- value = output[0]
- indice = output[1]
- point = collections.namedtuple('median', 'values,indices')
- rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
- if out is not None:
- if pynative_mode_condition():
- if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
- raise TypeError("In ms_adapter.torch.median(), `out` should be tuple of Tensors.")
- out[0].assign_value(value)
- out[1].assign_value(indice)
- return out
- else:
- raise ValueError('In MindSpore static graph mode, `out` in `median` shoud be None, '
- 'please set out=None and use return value instead of `out`.')
- return rlt
-
-
- def matmul(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.matmul(input, other)
- return _out_inplace_assign(out, output, "matmul")
-
-
- def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None):
- input = cast_to_ms_tensor(input)
- if dtype is None:
- dtype = ms.float32
- input = ms.numpy.array(input, dtype=dtype)
- output = ms.numpy.norm(input, ord=p, axis=dim, keepdims=keepdim)
- return _out_inplace_assign(out, output, "norm")
-
-
- def stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True,
- pad_mode='reflect', normalized=False, onesided=None, return_complex=None):
- unsupported_attr(normalized)
- unsupported_attr(onesided)
- unsupported_attr(return_complex)
- input = cast_to_ms_tensor(input)
- input = input.asnumpy()
- if pad_mode == 'reflect':
- pad_mode = 'even'
- if window is None:
- window = 'hann'
- if hop_length is None:
- hop_length = floor(n_fft / 4)
- if win_length is None:
- win_length = n_fft
- output = signal.stft(input, window=window, nperseg=win_length, noverlap=hop_length, padded=center,
- boundary=pad_mode)
- return output
-
-
- def istft():
- raise NotImplementedError
-
-
- def bartlett_window(window_length, periodic=True, dtype=None, layout=None, device=None, requires_grad=False):
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- input = tensor(window_length)
- output = ms.ops.bartlett_window(input, periodic=periodic, dtype=dtype)
- return cast_to_adapter_tensor(output)
-
-
- def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None,
- layout=None, device=None, requires_grad=False):
- unsupported_attr(periodic)
- unsupported_attr(alpha)
- unsupported_attr(beta)
- unsupported_attr(dtype)
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- output = ms.numpy.hamming(window_length)
- return cast_to_adapter_tensor(output)
-
-
- def hann_window(window_length, periodic=False, dtype=None, layout=None, device=None, requires_grad=False):
- unsupported_attr(periodic)
- unsupported_attr(dtype)
- unsupported_attr(layout)
- unsupported_attr(device)
- unsupported_attr(requires_grad)
- if periodic is True:
- raise NotImplementedError("periodic is not supported to True.")
- output = ms.numpy.hanning(window_length)
- return cast_to_adapter_tensor(output)
-
-
- def cumsum(input, dim, dtype=None, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.cumsum(input, axis=dim, dtype=dtype)
- return _out_inplace_assign(out, output, "cumsum")
-
-
- def einsum(equation, *operands):
- output = ms.ops.Einsum(equation=equation)(operands)
- return cast_to_adapter_tensor(output)
-
-
- def histc(input, bins=100, min=0, max=0, out=None):
- input = cast_to_ms_tensor(input)
- nbins = bins
- hist = ms.ops.HistogramFixedWidth(nbins)
- rang_op = ms.Tensor([min, max], ms.float32)
- output = hist(input, rang_op)
- return _out_inplace_assign(out, output, "histc")
-
-
- def triu(input, diagonal=0, out=None):
- input = cast_to_ms_tensor(input)
- input = ms.numpy.array(input)
- output = ms.numpy.triu(input, diagonal)
- output = cast_to_adapter_tensor(output)
- return _out_inplace_assign(out, output, "triu")
-
- def unbind(input, dim=0):
- input = cast_to_ms_tensor(input)
- output = ms.ops.unbind(input, dim)
- return cast_to_adapter_tensor(output)
-
-
- def unsqueeze(input, dim):
- input = cast_to_ms_tensor(input)
- output = ms.ops.unsqueeze(input, dim)
- return cast_to_adapter_tensor(output)
-
- def reshape(input, shape):
- input = cast_to_ms_tensor(input)
- shape = tuple(shape)
- output = ms.ops.reshape(input, shape)
- return cast_to_adapter_tensor(output)
-
- def isfinite(input):
- input_ms = cast_to_ms_tensor(input)
- output = ms.ops.isfinite(input_ms)
- return cast_to_adapter_tensor(output)
-
-
- def isnan(input):
- input_ms = cast_to_ms_tensor(input)
- return cast_to_adapter_tensor(input_ms.isnan())
-
-
- def view_as_real(input):
- #Todo: not view
- warnings.warn("not support output as a view.")
- input = cast_to_ms_tensor(input)
- input = input.asnumpy()
- real = np.expand_dims(np.real(input), axis=-1)
- imag = np.expand_dims(np.imag(input), axis=-1)
- output_np = np.concatenate((real, imag), axis=-1)
- output = ms.Tensor(output_np)
- return cast_to_adapter_tensor(output)
-
-
- def bincount(input, weights=None, minlength=0):
- input = cast_to_ms_tensor(input)
- type = 'int64'
- if input.dtype == ms.uint8:
- input = input.astype(ms.int16)
- if weights is not None:
- weights = cast_to_ms_tensor(weights)
- type = weights.dtype
- output = ms.numpy.bincount(input, weights, minlength).astype(type)
- return cast_to_adapter_tensor(output)
-
- def mul(input, other, *, out=None):
- if not isinstance(input, (int, adapter_tensor)):
- raise TypeError(f"mul(): argument 'input' (position 1) must be Tensor, not {type(input)}")
- if not isinstance(other, (int, adapter_tensor)):
- raise TypeError(f"mul(): argument 'other' (position 2) must be Tensor, not {type(other)}")
-
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.mul(input, other)
- return _out_inplace_assign(out, output, "mul")
-
-
- def index_select(input, dim, index, *, out=None):
- _input_params = cast_to_ms_tensor(input)
- _axis = dim
- _input_indices = cast_to_ms_tensor(index)
-
- output = ms.ops.gather(_input_params, _input_indices, _axis)
- return _out_inplace_assign(out, output, "index_select")
-
- def sort(input, dim=-1, descending=False, stable=False, *, out=None):
- unsupported_attr(stable)
- input = cast_to_ms_tensor(input)
- # TODO: ops.sort() should be replaced.
- output = ms.ops.Sort(dim, descending)(input)
- return _out_inplace_assign(out, output, "sort")
-
-
- def msort(input, *, out=None):
- input = cast_to_ms_tensor(input)
- # TODO: ops.sort() should be replaced.
- output, _ = ms.ops.Sort(axis=0)(input)
- return _out_inplace_assign(out, output, "msort")
-
-
- def argsort(input, dim=-1, descending=False, stable=False):
- unsupported_attr(stable)
- input = cast_to_ms_tensor(input)
- # TODO: ops.sort() should be replaced.
- _, output= ms.ops.Sort(dim, descending)(input)
- return cast_to_adapter_tensor(output)
-
- def t(input):
- input_ms = cast_to_ms_tensor(input)
- if input_ms.ndim > 2:
- raise ValueError("t() expects a tensor with <= 2 dimensions, but self is {}D".format(input_ms.ndim))
- dims = list(range(input_ms.ndim)).reverse()
- output = input_ms.transpose(dims)
- return cast_to_adapter_tensor(output)
-
- def squeeze(input, dim=None):
- input_ms = cast_to_ms_tensor(input)
- if dim is not None:
- if input_ms.shape[dim] != 1:
- output = input
- else:
- output = ms.ops.squeeze(input_ms, dim)
- else:
- output = ms.ops.squeeze(input_ms)
- return cast_to_adapter_tensor(output)
-
-
- def from_numpy(np_data):
- return cast_to_adapter_tensor(ms.Tensor.from_numpy(np_data))
-
-
- def absolute(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.absolute(input)
- return _out_inplace_assign(out, output, "absolute")
-
-
- def acos(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.acos(input)
- return _out_inplace_assign(out, output, "acos")
-
-
- def arccos(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.acos(input)
- return _out_inplace_assign(out, output, "arccos")
-
-
- def acosh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- output = ms.ops.acosh(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "acosh")
-
- def arccosh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- output = ms.ops.acosh(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "arccosh")
-
-
- def add(input, other, *, alpha=1, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.add(input, other*alpha)
- return _out_inplace_assign(out, output, "add")
-
-
- def addcdiv(input, tensor1, tensor2, *, value=1, out=None):
- input = cast_to_ms_tensor(input)
- tensor1 = cast_to_ms_tensor(tensor1)
- tensor2 = cast_to_ms_tensor(tensor2)
- value = ms.Tensor(value)
- output = ms.ops.addcdiv(input, tensor1, tensor2, value)
- return _out_inplace_assign(out, output, "addcdiv")
-
-
- def addcmul(input, tensor1, tensor2, *, value=1, out=None):
- #Todo: use ms.ops.addcmul after it has been fixed
- input = cast_to_ms_tensor(input)
- tensor1 = cast_to_ms_tensor(tensor1)
- tensor2 = cast_to_ms_tensor(tensor2)
- value = ms.Tensor(value)
- mul = ms.ops.mul(tensor1, tensor2) * value
- output = ms.ops.add(input, mul)
- return _out_inplace_assign(out, output, "addcmul")
-
-
- def angle(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape)>7:
- input = input.flatten()
-
- real = ms.ops.Real()(input)
- imag = ms.ops.Imag()(input)
- #Todo: ms.ops.copysign is not same as torch.copysign when input is -0.0,
- # replace to ms.ops.copysign after it has been fixed
- imag_np = imag.asnumpy()
- sign_imag_np = np.copysign(np.ones_like(imag_np), imag_np)
- sign_imag = ms.Tensor(sign_imag_np)
-
- denom = ms.ops.sqrt(ms.ops.square(real) + ms.ops.square(imag))
- div = ms.ops.div(real, denom)
- mask = ms.ops.equal(denom, ms.Tensor(0))
- mask_array = ms.ops.ones_like(denom)*sign_imag
- div = ms.ops.select(mask, mask_array, div)
- output = ms.ops.mul(ms.ops.acos(div), sign_imag)
-
- if len(shape)>7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "angle")
-
-
- def asin(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.asin(input)
- return _out_inplace_assign(out, output, "asin")
-
-
- def arcsin(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.asin(input)
- return _out_inplace_assign(out, output, "arcsin")
-
-
- def asinh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- output = ms.ops.asinh(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "asinh")
-
-
- def arcsinh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- output = ms.ops.asinh(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "arcsinh")
-
-
- def atan(input, *, out=None):
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- input = cast_to_ms_tensor(input)
- output = ms.ops.atan(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "atan")
-
-
- def arctan(input, *, out=None):
- shape = input.shape
- if len(shape) > 7:
- input = input.flatten()
- input = cast_to_ms_tensor(input)
- output = ms.ops.atan(input)
- if len(shape) > 7:
- output = output.reshape(shape)
- return _out_inplace_assign(out, output, "arctan")
-
-
- def atanh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.atanh(input)
- return _out_inplace_assign(out, output, "atanh")
-
-
- def arctanh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.atanh(input)
- return _out_inplace_assign(out, output, "arctanh")
-
-
- def arctan2(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.atan2(input, other)
- return _out_inplace_assign(out, output, "arctan2")
-
-
- def bitwise_not(input, *, out=None):
- input = cast_to_ms_tensor(input)
- type = input.dtype
- if str(type) != 'Bool':
- output = 0 - input - 1
- else:
- output = 1 - input
- output = output.astype(ms.bool_)
- return _out_inplace_assign(out, output, "bitwise_not")
-
-
- def bitwise_and(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input)
- input_is_bool = str(input.dtype) == 'Bool'
- else:
- input_is_bool = isinstance(input, bool)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- other_is_bool = str(other.dtype) == 'Bool'
- else:
- other_is_bool = isinstance(other, bool)
- if input_is_bool and other_is_bool:
- if isinstance(input, adapter_tensor):
- input = input.astype(ms.int8)
- else:
- other = other.astype(ms.int8)
- output = ms.ops.bitwise_and(input, other)
- if input_is_bool and other_is_bool:
- output = output.astype(ms.bool_)
- return _out_inplace_assign(out, output, "bitwise_and")
-
-
- def bitwise_or(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input)
- input_is_bool = str(input.dtype) == 'Bool'
- else:
- input_is_bool = isinstance(input, bool)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- other_is_bool = str(other.dtype) == 'Bool'
- else:
- other_is_bool = isinstance(other, bool)
- if input_is_bool and other_is_bool:
- if isinstance(input, adapter_tensor):
- input = input.astype(ms.int8)
- else:
- other = other.astype(ms.int8)
- output = ms.ops.bitwise_or(input, other)
- if input_is_bool and other_is_bool:
- output = output.astype(ms.bool_)
- return _out_inplace_assign(out, output, "bitwise_or")
-
-
- def bitwise_xor(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input)
- input_is_bool = str(input.dtype) == 'Bool'
- else:
- input_is_bool = isinstance(input, bool)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other)
- other_is_bool = str(other.dtype) == 'Bool'
- else:
- other_is_bool = isinstance(other, bool)
- if input_is_bool and other_is_bool:
- if isinstance(input, adapter_tensor):
- input = input.astype(ms.int8)
- else:
- other = other.astype(ms.int8)
- output = ms.ops.bitwise_xor(input, other)
- if input_is_bool and other_is_bool:
- output = output.astype(ms.bool_)
- return _out_inplace_assign(out, output, "bitwise_xor")
-
-
- def bitwise_left_shift(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).asnumpy()
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other).asnumpy()
- output = ms.Tensor(np.left_shift(input, other))
- return _out_inplace_assign(out, output, "bitwise_left_shift")
-
-
- def bitwise_right_shift(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).asnumpy()
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other).asnumpy()
- output = ms.Tensor(np.right_shift(input, other))
- return _out_inplace_assign(out, output, "bitwise_right_shift")
-
-
- def split(tensor, split_size_or_sections, dim=0):
- tensor = cast_to_ms_tensor(tensor)
- tensor_shape = list(tensor.shape)
- length_along_dim = tensor_shape[dim]
- dims = tensor.ndim
- if isinstance(split_size_or_sections, int):
- if split_size_or_sections > length_along_dim:
- return cast_to_adapter_tensor(tensor)
- if split_size_or_sections % length_along_dim == 0:
- output_num = int(split_size_or_sections / length_along_dim)
- output = ms.ops.split(tensor, axis=dim, output_num=output_num)
- else:
- num_short_tensor = int(length_along_dim % split_size_or_sections)
- length1 = split_size_or_sections * (length_along_dim // split_size_or_sections)
- length2 = num_short_tensor
- start1 = [0, ] * dims
- size1 = copy.deepcopy(tensor_shape)
- size1[dim] = length1
- start2 = [0,] * dims
- start2[dim] = length1
- size2 = copy.deepcopy(tensor_shape)
- size2[dim] = length2
- tensor1 = ms.ops.slice(tensor, begin=start1, size=size1)
- tensor2 = ms.ops.slice(tensor, begin=start2, size=size2)
- output_num = int(length_along_dim / split_size_or_sections)
- output = list(ms.ops.split(tensor1, axis=dim, output_num=output_num))
- output.append(tensor2)
- elif isinstance(split_size_or_sections, (list, tuple)):
- sum = 0
- for i in split_size_or_sections:
- sum += i
- if sum != tensor_shape[dim]:
- raise ValueError("split_with_sizes expects split_sizes to sum exactly to {} "
- "(input tensor's size at dimension {}), "
- "but got split_sizes={}".format(tensor_shape[dim], dim, split_size_or_sections))
- output = []
- cur = 0
- for i in split_size_or_sections:
- start = [0,] * dims
- start[dim] = cur
- size = tensor_shape
- size[dim] = i
- res = ms.ops.slice(tensor, begin=start, size = size)
- cur += i
- output.append(res)
- else:
- raise ValueError("Argument `split_size_or_sections` should be be integer, "
- "tuple(int) or list(int), but got {}.".format(split_size_or_sections))
-
- res = []
- for i in output:
- res.append(cast_to_adapter_tensor(i))
- return res
-
- def nonzero(input, *, out=None, as_tuple=False):
- input = cast_to_ms_tensor(input)
- if as_tuple:
- if input.ndim == 1:
- res = ms.ops.nonzero(input)
- output = (cast_to_adapter_tensor(res.flatten()), )
- elif input.ndim > 1:
- output = []
- res = ms.ops.nonzero(input)
- res = res.transpose(1,0)
- res = ms.ops.split(res, axis=0, output_num=input.ndim)
- for cur in res:
- output.append(cast_to_adapter_tensor(cur))
- output = tuple(output)
- elif input.ndim == 0:
- raise ValueError("Do not support input ndim == 0.")
- return output # TODO: out is not assigned
- output = ms.ops.nonzero(input)
- return _out_inplace_assign(out, output, "nonzero")
-
- def clip(input, min=None, max=None, *, out=None):
- input = cast_to_ms_tensor(input)
- output = input.clip(min, max)
- return _out_inplace_assign(out, output, "clip")
-
-
- def conj_physical(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.conj(input)
- return _out_inplace_assign(out, output, "conj_physical")
-
- def copysign(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- input_type = input.dtype
- input = input.asnumpy()
- is_num = True
- if isinstance(other, adapter_tensor):
- is_num = False
- other = cast_to_ms_tensor(other)
- other_type = other.dtype
- other = other.asnumpy()
- output = ms.Tensor(np.copysign(input, other))
-
- if 'Int' in str(input_type):
- if is_num or 'Int' in str(other_type):
- output = output.astype(ms.float32)
- else:
- output = output.astype(other_type)
- elif is_num or 'Int' in str(other_type):
- output = output.astype(input_type)
- return _out_inplace_assign(out, output, "copysign")
-
-
- def cosh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.cosh(input)
- return _out_inplace_assign(out, output, "cosh")
-
-
- def deg2rad(input, *, out=None):
- input = cast_to_ms_tensor(input)
- if input.dtype not in (ms.float16, ms.float32, ms.float64):
- input = input.astype(ms.float32)
- output = ms.ops.deg2rad(input)
- return _out_inplace_assign(out, output, "cosh")
-
-
- def devide(input, other, *, rounding_mode=None, out=None):
- _out_limit_pynative(out, "devide")
- return div(input, other, rounding_mode=rounding_mode, out=out)
-
- #Todo: not found class Digamma
- #def digamma(input, *, out=None):
-
-
- def erf(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.erf(input)
- return _out_inplace_assign(out, output, "erf")
-
-
- def erfc(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.erfc(input)
- return _out_inplace_assign(out, output, "erfc")
-
-
- def erfinv(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.erfinv(input)
- return _out_inplace_assign(out, output, "erfinv")
-
-
- def exp2(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.exp2(input)
- return _out_inplace_assign(out, output, "exp2")
-
-
- def expm1(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.expm1(input)
- return _out_inplace_assign(out, output, "expm1")
-
-
- def fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max):
- input = cast_to_ms_tensor(input)
- scale = cast_to_ms_tensor(scale)
- zero_point = cast_to_ms_tensor(zero_point)
- if axis not in range(0, input.ndim):
- raise IndexError("`axis` must be between 0 and number of dimensions of input")
- if input.shape[axis] != scale.shape[0] or input.shape[axis] != zero_point.shape[0]:
- raise RuntimeError("dimensions of scale or zero-point are not consistent with input tensor")
- i = axis + 1
- while i < input.ndim:
- scale = scale.expand_dims(-1)
- zero_point = zero_point.expand_dims(-1)
- i += 1
- output = ms.ops.round(input/scale + zero_point)
- output = ms.ops.clip_by_value(output, quant_min, quant_max) - zero_point
- output = output * scale
- return cast_to_adapter_tensor(output)
-
-
- def fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max):
- input = cast_to_ms_tensor(input)
- scale = cast_to_ms_tensor(scale)
- zero_point = cast_to_ms_tensor(zero_point)
-
- output = ms.ops.round(input/scale + zero_point)
- output = ms.ops.clip_by_value(output, quant_min, quant_max) - zero_point
- output = output * scale
- return cast_to_adapter_tensor(output)
-
-
- def fix(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.trunc(input)
- return _out_inplace_assign(out, output, "fix")
-
-
- def float_power(input, exponent, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).asnumpy()
- if isinstance(exponent, adapter_tensor):
- exponent = cast_to_ms_tensor(exponent).asnumpy()
- output = ms.Tensor(np.float_power(input, exponent))
- return _out_inplace_assign(out, output, "float_power")
-
-
- def floor_divide(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.floor_div(input, other)
- return _out_inplace_assign(out, output, "floor_divide")
-
-
- def frexp(input, *, out=None):
- _out_limit_pynative(out, "frexp")
- input = cast_to_ms_tensor(input).asnumpy()
- mantissa, exponent = np.frexp(input)
- out1 = ms.Tensor(mantissa)
- out2 = ms.Tensor(exponent)
- if out is not None and len(out) != 2:
- out[0].assign_value(out1)
- out[1].assign_value(out2)
- return out
- return cast_to_adapter_tensor(out1), cast_to_adapter_tensor(out2)
-
-
- def gradient(input, *, spacing=1, dim=None, edge_order=1):
- input = cast_to_ms_tensor(input)
- if isinstance(spacing, adapter_tensor):
- spacing = cast_to_ms_tensor(spacing)
- elif isinstance(spacing, tuple) and isinstance(spacing[0], adapter_tensor):
- spacing = cast_to_ms_tensor(spacing)
- output = ms.numpy.gradient(input, spacing, axis=dim, edge_order=edge_order)
- output = cast_to_adapter_tensor(output)
- if not isinstance(output, tuple):
- return (output,)
- else:
- return output
-
-
- def imag(input):
- input = cast_to_ms_tensor(input)
- output = ms.ops.imag(input)
- return cast_to_adapter_tensor(output)
-
-
- def ldexp(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.ldexp(input, other)
- return _out_inplace_assign(out, output, "ldexp")
-
-
- def lerp(input, end, weight, *, out=None):
- input = cast_to_ms_tensor(input)
- end = cast_to_ms_tensor(end)
- if isinstance(weight, adapter_tensor):
- weight = cast_to_ms_tensor(weight)
- elif not isinstance(weight, float):
- weight = float(weight)
- output = ms.ops.lerp(input, end, weight)
- return _out_inplace_assign(out, output, "lerp")
-
-
- #Todo
- #def lgamma(input, *, out=None):
-
-
- def logaddexp(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.logaddexp(input, other)
- return _out_inplace_assign(out, output, "logaddexp")
-
-
- def logaddexp2(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.logaddexp2(input, other)
- return _out_inplace_assign(out, output, "logaddexp2")
-
-
- def logical_and(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).astype(ms.bool_)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other).astype(ms.bool_)
- output = ms.ops.logical_and(input, other)
- return _out_inplace_assign(out, output, "logical_and")
-
-
- def logical_not(input, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).astype(ms.bool_)
- output = ms.ops.logical_not(input)
- return _out_inplace_assign(out, output, "logical_not")
-
-
- def logical_or(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).astype(ms.bool_)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other).astype(ms.bool_)
- output = ms.ops.logical_or(input, other)
- return _out_inplace_assign(out, output, "logical_or")
-
-
- def logical_xor(input, other, *, out=None):
- if isinstance(input, adapter_tensor):
- input = cast_to_ms_tensor(input).astype(ms.bool_)
- if isinstance(other, adapter_tensor):
- other = cast_to_ms_tensor(other).astype(ms.bool_)
- output = ms.ops.logical_xor(input, other)
- return _out_inplace_assign(out, output, "logical_xor")
-
-
- def logit(input, eps=None, *, out=None):
- #TODO: ops.logit not support cpu
- input = cast_to_ms_tensor(input)
- if eps is not None:
- input = ms.ops.clip_by_value(input, eps, 1.0-eps)
- output = ms.ops.log(input/(1.0-input))
- return _out_inplace_assign(out, output, "logit")
-
- def frombuffer(buffer, *, dtype = None, count=- 1, offset=0, requires_grad=False):
- unsupported_attr(requires_grad)
- np_dtype = _TypeDict[dtype]
- output = np.frombuffer(buffer=buffer, dtype=np_dtype, count=count, offset=offset)
- return adapter_tensor(output, dtype=dtype)
-
- def as_strided(input, size, stride, storage_offset=None):
- warnings.warn("not support output as a view.")
- input_ms = cast_to_ms_tensor(input)
- if len(size) != len(stride):
- raise RuntimeError("mismatch in length of strides and shape.")
- index = np.arange(0, size[0] * stride[0], stride[0])
- for i in range(1, len(size)):
- tmp = np.arange(0, size[i] * stride[i], stride[i])
- index = np.expand_dims(index, -1)
- index = index + tmp
- if storage_offset is not None:
- index = index + storage_offset
- input_indices = ms.Tensor(index)
- out = ms.ops.gather(input_ms.reshape(-1), input_indices, 0)
- return cast_to_adapter_tensor(out)
-
- def ne(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.ne(input, other)
- return _out_inplace_assign(out, output, "ne")
-
-
- def tanh(input, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.tanh(input)
- return _out_inplace_assign(out, output, "tanh")
-
-
- def maximum(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.maximum(input, other)
- return _out_inplace_assign(out, output, "maximum")
-
-
- def minimum(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.minimum(input, other)
- return _out_inplace_assign(out, output, "minimum")
-
-
-
- def sigmoid(input, *, out=None):
- #TODO: ms.ops.sigmoid() not support float64
- input = cast_to_ms_tensor(input)
- output = 1 / (ms.ops.exp(0 - input) + 1)
- #output = ms.ops.sigmoid(input)
- return _out_inplace_assign(out, output, "sigmoid")
-
-
- def softmax(input, dim, dtype=None, *, out=None):
- input = cast_to_ms_tensor(input)
- if dtype is not None:
- input = input.astype(dtype)
- output = ms.ops.softmax(input, dim)
- return _out_inplace_assign(out, output, "softmax")
-
-
- def prod(input, dim=None, keepdim=False, *, dtype=None, out=None):
- input = cast_to_ms_tensor(input)
- if dtype is not None:
- input = input.astype(dtype)
- if dim is None:
- output = ms.ops.prod(input)
- else:
- output = ms.ops.prod(input, axis=dim, keep_dims=keepdim)
- return _out_inplace_assign(out, output, "prod")
-
-
- def eq(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.equal(input, other)
- return _out_inplace_assign(out, output, "eq")
-
-
- def hypot(input, other, *, out=None):
- #TODO: can't found ms.ops.hypot()
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.sqrt(input.square() + other.square())
- #output = ms.ops.hypot(input, other)
- return _out_inplace_assign(out, output, "hypot")
-
-
- def i0(input, *, out=None):
- input = cast_to_ms_tensor(input)
- float_type = [ms.float16, ms.half, ms.float32, ms.single, ms.float64, ms.double]
- if input.dtype not in float_type:
- input = input.astype(ms.float32)
- output = ms.ops.bessel_i0(input)
- return _out_inplace_assign(out, output, "i0")
-
- def _set_type_gamma(input, other):
- float_type = [ms.float16, ms.half, ms.float32, ms.single, ms.float64, ms.double]
- is_float16 = False
- input_type_index = -1
- other_type_index = -1
- if input.dtype in float_type:
- input_type_index = float_type.index(input.dtype)
- if other.dtype in float_type:
- other_type_index = float_type.index(other.dtype)
- if input_type_index < other_type_index:
- input = input.astype(other.dtype)
- elif input_type_index > other_type_index:
- other = other.astype(input.dtype)
- if input.dtype == ms.float16:
- input = input.astype(ms.float32)
- other = other.astype(ms.float32)
- is_float16 = True
- return input, other, is_float16
-
- def igamma(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- #TODO: delete here after ms.ops.igamma support those types as pytorch
- input, other, is_float16 = _set_type_gamma(input, other)
- output = ms.ops.igamma(input, other)
- if is_float16:
- output = output.astype(ms.float16)
- return _out_inplace_assign(out, output, "igamma")
-
-
- def igammac(input, other, *, out=None):
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- # TODO: delete here after ms.ops.igammac support those types as pytorch
- input, other, is_float16 = _set_type_gamma(input, other)
- output = ms.ops.igammac(input, other)
- if is_float16:
- output = output.astype(ms.float16)
- return _out_inplace_assign(out, output, "igammac")
-
-
- def multiply(input, other, *, out=None):
- if not isinstance(input, (int, adapter_tensor)):
- raise TypeError(f"multiply(): argument 'input' (position 1) must be Tensor, not {type(input)}")
- if not isinstance(other, (int, adapter_tensor)):
- raise TypeError(f"multiply(): argument 'other' (position 2) must be Tensor, not {type(other)}")
-
- input = cast_to_ms_tensor(input)
- other = cast_to_ms_tensor(other)
- output = ms.ops.mul(input, other)
- return _out_inplace_assign(out, output, "multiply")
-
-
- def mvlgamma(input, p, *, out=None):
- input = cast_to_ms_tensor(input)
- output = ms.ops.mvlgamma(input, p)
- return _out_inplace_assign(out, output, "mvlgamma")
|