@@ -11,10 +11,12 @@ from mindspore import ops
from mindspore.common import dtype as mstype
from mindspore.ops import constexpr
from ms_adapter.pytorch.tensor import tensor, cast_tensor, cast_to_ms_tensor, cast_to_adapter_tensor
from ms_adapter.utils import unsupported_attr, get_backend
from ms_adapter.utils import unsupported_attr, get_backend, pynative_mode_condition
from ms_adapter.pytorch.tensor import Tensor as adapter_tensor
from ms_adapter.pytorch.common._inner import _out_inplace_assign, _out_limit_pynative
from ms_adapter.pytorch.common.dtype import _TypeDict
def empty(*size, out=None, dtype=None, layout=None, \
device=None, requires_grad=False, pin_memory=False, \
memory_format=None):
@@ -30,10 +32,7 @@ def empty(*size, out=None, dtype=None, layout=None, \
if isinstance(size[0], (tuple, list)):
_size = size[0]
output = ms.numpy.empty(_size, dtype)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "empty")
def eye(n, m=None, *, out=None, dtype=None, layout=None, \
@@ -48,10 +47,7 @@ def eye(n, m=None, *, out=None, dtype=None, layout=None, \
dtype = ms.float32
output = ms.ops.eye(n, m, dtype)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "eye")
def cat(tensors, dim=0, *, out=None):
@@ -63,13 +59,18 @@ def cat(tensors, dim=0, *, out=None):
inputs = cast_to_ms_tensor(tensors)
output = ops.concat(inputs, dim)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "cat")
def concat(tensors, dim=0, *, out=None):
return cat(tensors, dim, out=out)
if tensors is None:
raise ValueError('`tensors` in `{}` should not be None'.format(concat.__name__))
if not isinstance(tensors, (tuple, list)):
raise TypeError('`tensors` in `{}` should be tuple or list'.format(concat.__name__))
inputs = cast_to_ms_tensor(tensors)
output = ops.concat(inputs, dim)
return _out_inplace_assign(out, output, "concat")
def ones(*size, out=None, dtype=None, layout=None,
device=None, requires_grad=False):
@@ -84,21 +85,13 @@ def ones(*size, out=None, dtype=None, layout=None,
output = ms.ops.ones(*size, type=dtype)
else:
output = ms.ops.ones(size, type=dtype)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "ones")
def stack(tensors, dim = 0, *, out=None):
tensors = cast_to_ms_tensor(tensors)
output = ops.stack(tensors, dim)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "stack")
def meshgrid(*tensors, indexing='ij'):
@@ -116,10 +109,7 @@ def meshgrid(*tensors, indexing='ij'):
def log(input, *, out=None):
input = cast_to_ms_tensor(input)
output = ops.log(input)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "log")
def chunk(input, chunks, dim=0):
@@ -141,10 +131,7 @@ def diag(input, diagonal=0, *, out=None):
# May be use mindspore.ops.diag instead. Nowadays, this operator do not support CPU.
input = cast_to_ms_tensor(input)
output = ms.numpy.diag(input, diagonal)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "diag")
def sqrt(input, *, out=None):
@@ -153,10 +140,7 @@ def sqrt(input, *, out=None):
input = cast_to_ms_tensor(input)
output = ops.sqrt(input)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "sqrt")
def mm(input, mat2, *, out=None):
@@ -168,10 +152,7 @@ def mm(input, mat2, *, out=None):
input2 = cast_to_ms_tensor(mat2)
output = ops.matmul(input1, input2)
output = ops.cast(output, output_type)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "mm")
def zeros(*size, out=None, dtype=None, device=None, requires_grad=False):
@@ -189,11 +170,7 @@ def zeros(*size, out=None, dtype=None, device=None, requires_grad=False):
dtype = mstype.float32
output = ms.ops.zeros(size, dtype)
if out is not None:
out.assign_value(output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "zeros")
def div(input, other, *, rounding_mode=None, out=None):
@@ -217,10 +194,7 @@ def div(input, other, *, rounding_mode=None, out=None):
output = ms.ops.floor_div(input, other)
output = ms.ops.cast(output, input_dtype)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "div")
def flatten(input, start_dim=0, end_dim=-1):
@@ -332,10 +306,7 @@ def multinomial(input, num_samples, replacement=False, *, generator=None, out=No
warnings.warn("torch.multinomal don't support generator now.")
input_tensor = cast_to_ms_tensor(input).astype(mstype.float32)
output = ms.ops.multinomial(input_tensor, num_samples, replacement)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "multinomial")
def randperm(n, *, generator=None, out=None, dtype=mstype.int64, layout=None, device=None,
@@ -353,9 +324,7 @@ def randperm(n, *, generator=None, out=None, dtype=mstype.int64, layout=None, de
output = np.random.permutation(n)
output = tensor(output, dtype=dtype)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "randperm")
def randint(low, high, size, *, generator=None, out=None, dtype=None, layout=None,
@@ -372,9 +341,7 @@ def randint(low, high, size, *, generator=None, out=None, dtype=None, layout=Non
output = np.random.randint(low, high, size)
output = tensor(output, dtype=dtype)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "randint")
def as_tensor(data, dtype=None, device=None):
@@ -408,7 +375,6 @@ def ones_like(input, dtype=None, layout=None, device=None, requires_grad=False,
def empty_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
unsupported_attr(layout)
unsupported_attr(device)
@@ -425,10 +391,7 @@ def full(size, fill_value, out=None, dtype=None, layout=None, device=None, requi
unsupported_attr(device)
unsupported_attr(requires_grad)
output = ms.numpy.full(size, fill_value, dtype)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "full")
def full_like(input, fill_value, dtype=None, layout=None, device=None, requires_grad=False, memory_format=None):
@@ -466,10 +429,7 @@ def rand(*size, out=None, dtype=None, layout=None, device=None, requires_grad=Fa
if dtype is None:
dtype = ms.float32
output = ms.numpy.rand(*size, dtype=dtype)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "rand")
def linspace(start, end, steps, out=None, dtype=None, device=None, requires_grad=False):
@@ -480,10 +440,7 @@ def linspace(start, end, steps, out=None, dtype=None, device=None, requires_grad
start = ms.Tensor(start, dtype)
end = ms.Tensor(end, dtype)
output = ms.ops.linspace(start, end, steps)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "linspace")
def take(input, index):
@@ -497,40 +454,34 @@ def take(input, index):
def abs(input, out=None):
input = cast_to_ms_tensor(input)
output = ms.numpy.abs(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "abs")
def atan2(input, other, out=None):
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.atan2(input, other)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "atan2")
def clamp(input, min=None, max=None, out=None):
input = cast_to_ms_tensor(input)
min = ms.Tensor(min, ms.float32)
max = ms.Tensor(max, ms.float32)
output = ms.ops.clip_by_value(input, min, max)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
input_ms = cast_to_ms_tensor(input)
type = input_ms.dtype
if min is not None and max is not None and min > max:
output = ms.ops.ones_like(input_ms).astype(type)*max
else:
if min is not None:
min = ms.Tensor(min, type)
if max is not None:
max = ms.Tensor(max, type)
output = ms.ops.clip_by_value(input_ms, min, max)
return _out_inplace_assign(out, output, "clamp")
def cos(input, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.cos(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "cos")
class Device():
@@ -575,10 +526,7 @@ def fmod(input, other, out=None):
input = ms.numpy.array(input)
other = ms.numpy.array(other)
output = ms.numpy.fmod(input, other)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "fmod")
def frac(input, out=None):
@@ -586,49 +534,34 @@ def frac(input, out=None):
input = cast_to_ms_tensor(input)
input = ms.numpy.array(input)
output = input - ms.numpy.floor(ms.numpy.abs(input)) * ms.numpy.sign(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "frac")
def log10(input, out=None):
input = cast_to_ms_tensor(input)
input = ms.numpy.array(input)
output = ms.numpy.log10(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "log10")
def log1p(input, out=None):
input = cast_to_ms_tensor(input)
input = ms.numpy.array(input)
output = ms.numpy.log1p(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "log1p")
def log2(input, out=None):
input = cast_to_ms_tensor(input)
input = ms.numpy.array(input)
output = ms.numpy.log2(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "log2")
def sin(input, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.sin(input)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "sin")
def max(input, dim=None, keepdim=False, *, out=None):
@@ -648,8 +581,15 @@ def max(input, dim=None, keepdim=False, *, out=None):
point = collections.namedtuple('max', 'values,indices')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
if out is not None:
ops.assign(out, rlt)
return out
if pynative_mode_condition():
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In ms_adapter.torch.max(), `out` should be tuple of Tensors.")
out[0].assign_value(value)
out[1].assign_value(indice)
return out
else:
raise ValueError('In MindSpore static graph mode, `out` in `max` shoud be None, '
'please set out=None and use return value instead of `out`.')
return rlt
@@ -661,12 +601,15 @@ def min(input, dim=None, keepdim=False, *, out=None):
indices, result = ms.ops.min(input, axis=dim, keep_dims=keepdim)
if out is not None:
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In ms_adapter.torch.min(), `out` should be tuple of Tensors.")
out[0].assign_value(result)
out[1].assign_value(indices)
return out
if pynative_mode_condition():
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In ms_adapter.torch.min(), `out` should be tuple of Tensors.")
out[0].assign_value(result)
out[1].assign_value(indices)
return out
else:
raise ValueError('In MindSpore static graph mode, `out` in `min` shoud be None, '
'please set out=None and use return value instead of `out`.')
return cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices)
@@ -679,10 +622,7 @@ def mean(input, dim=None, keepdim=False, *, dtype=None, out=None):
output = ms.ops.mean(input, axis=dim, keep_dims=keepdim)
else:
output = ms.ops.mean(input, keep_dims=keepdim)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "mean")
def round(input, *, decimals=0, out=None):
@@ -693,37 +633,25 @@ def round(input, *, decimals=0, out=None):
p = 10**decimals
input = input*p
output = ms.ops.round(input)/p
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "round")
def floor(input, *, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.floor(input)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "floor")
def ceil(input, *, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.ceil(input)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "ceil")
def sign(input, *, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.Sign()(input)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "sign")
def pow(input, exponent, *, out=None):
@@ -733,10 +661,7 @@ def pow(input, exponent, *, out=None):
exponent = cast_to_ms_tensor(exponent)
output = ms.ops.pow(input, exponent)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "pow")
def exp(input, *, out=None):
@@ -749,10 +674,7 @@ def exp(input, *, out=None):
output = ms.ops.exp(input)
if len(shape) > 7:
output = output.reshape(shape)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "exp")
def ge(input, other, *, out=None):
@@ -760,10 +682,7 @@ def ge(input, other, *, out=None):
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other)
output = ms.ops.ge(input, other)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "ge")
def gt(input, other, *, out=None):
@@ -771,10 +690,7 @@ def gt(input, other, *, out=None):
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other)
output = ms.ops.gt(input, other)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "gt")
def le(input, other, *, out=None):
@@ -782,10 +698,7 @@ def le(input, other, *, out=None):
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other)
output = ms.ops.le(input, other)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "le")
def lt(input, other, *, out=None):
@@ -794,10 +707,7 @@ def lt(input, other, *, out=None):
other = cast_to_ms_tensor(other)
output = 1 - ms.ops.ge(input, other)
output = output.astype(ms.bool_)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "lt")
def sum(input, dim=None, keepdim=False, *, dtype=None, out=None):
@@ -805,19 +715,13 @@ def sum(input, dim=None, keepdim=False, *, dtype=None, out=None):
if dtype is not None:
input = input.astype(dtype)
output = input.sum(axis=dim, keepdims=keepdim)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "sum")
def median(input, dim=None, keepdim=False, *, out=None):
input = cast_to_ms_tensor(input)
if dim is None:
output, _ = ms.ops.median(input, global_median=True, keep_dims=keepdim)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
else:
output = list(ms.ops.median(input, axis=dim, keep_dims=keepdim))
@@ -826,8 +730,15 @@ def median(input, dim=None, keepdim=False, *, out=None):
point = collections.namedtuple('median', 'values,indices')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
if out is not None:
ops.assign(out, rlt)
return out
if pynative_mode_condition():
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
raise TypeError("In ms_adapter.torch.median(), `out` should be tuple of Tensors.")
out[0].assign_value(value)
out[1].assign_value(indice)
return out
else:
raise ValueError('In MindSpore static graph mode, `out` in `median` shoud be None, '
'please set out=None and use return value instead of `out`.')
return rlt
@@ -835,10 +746,7 @@ def matmul(input, other, *, out=None):
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.matmul(input, other)
if out is not None:
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "matmul")
def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None):
@@ -847,10 +755,7 @@ def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None):
dtype = ms.float32
input = ms.numpy.array(input, dtype=dtype)
output = ms.numpy.norm(input, ord=p, axis=dim, keepdims=keepdim)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "norm")
def stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True,
@@ -914,11 +819,7 @@ def hann_window(window_length, periodic=False, dtype=None, layout=None, device=N
def cumsum(input, dim, dtype=None, out=None):
input = cast_to_ms_tensor(input)
output = ms.ops.cumsum(input, axis=dim, dtype=dtype)
output = cast_to_adapter_tensor(output)
if out is not None:
ops.assign(out, output)
return out
return output
return _out_inplace_assign(out, output, "cumsum")
def einsum(equation, *operands):
@@ -932,10 +833,7 @@ def histc(input, bins=100, min=0, max=0, out=None):
hist = ms.ops.HistogramFixedWidth(nbins)
rang_op = ms.Tensor([min, max], ms.float32)
output = hist(input, rang_op)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "histc")
def triu(input, diagonal=0, out=None):
@@ -943,9 +841,7 @@ def triu(input, diagonal=0, out=None):
input = ms.numpy.array(input)
output = ms.numpy.triu(input, diagonal)
output = cast_to_adapter_tensor(output)
if out is not None:
return out.assign_value(output)
return output
return _out_inplace_assign(out, output, "triu")
def unbind(input, dim=0):
input = cast_to_ms_tensor(input)
@@ -1007,10 +903,7 @@ def mul(input, other, *, out=None):
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.mul(input, other)
if out is not None:
out.assign_value(output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "mul")
def index_select(input, dim, index, *, out=None):
@@ -1019,27 +912,21 @@ def index_select(input, dim, index, *, out=None):
_input_indices = cast_to_ms_tensor(index)
output = ms.ops.gather(_input_params, _input_indices, _axis)
if out is not None:
out.assign_value(output)
return out
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "index_select")
def sort(input, dim=-1, descending=False, stable=False, *, out=None):
unsupported_attr(stable)
unsupported_attr(out)
input = cast_to_ms_tensor(input)
# TODO: ops.sort() should be replaced.
output = ms.ops.Sort(dim, descending)(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "sort" )
def msort(input, *, out=None):
unsupported_attr(out)
input = cast_to_ms_tensor(input)
# TODO: ops.sort() should be replaced.
output, _ = ms.ops.Sort(axis=0)(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "msort" )
def argsort(input, dim=-1, descending=False, stable=False):
@@ -1074,32 +961,24 @@ def from_numpy(np_data):
def absolute(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.absolute(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "absolute" )
def acos(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.acos(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "acos" )
def arccos(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.acos(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arccos" )
def acosh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
shape = input.shape
if len(shape) > 7:
@@ -1107,12 +986,9 @@ def acosh(input, *, out=None):
output = ms.ops.acosh(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "acosh")
def arccosh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
shape = input.shape
if len(shape) > 7:
@@ -1120,46 +996,37 @@ def arccosh(input, *, out=None):
output = ms.ops.acosh(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arccosh" )
def add(input, other, *, alpha=1, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.add(input, other*alpha)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "add" )
def addcdiv(input, tensor1, tensor2, *, value=1, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
tensor1 = cast_to_ms_tensor(tensor1)
tensor2 = cast_to_ms_tensor(tensor2)
value = ms.Tensor(value)
output = ms.ops.addcdiv(input, tensor1, tensor2, value)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "addcdiv" )
def addcmul(input, tensor1, tensor2, *, value=1, out=None):
#Todo: use ms.ops.addcmul after it has been fixed
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
tensor1 = cast_to_ms_tensor(tensor1)
tensor2 = cast_to_ms_tensor(tensor2)
value = ms.Tensor(value)
mul = ms.ops.mul(tensor1, tensor2) * value
output = ms.ops.add(input, mul)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "addcmul" )
def angle(input, *, out=None):
#TODO: use ms.ops.angle
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
shape = input.shape
if len(shape)>7:
@@ -1182,28 +1049,22 @@ def angle(input, *, out=None):
if len(shape)>7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "angle" )
def asin(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.asin(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "asin" )
def arcsin(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.asin(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arcsin" )
def asinh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
shape = input.shape
if len(shape) > 7:
@@ -1211,12 +1072,10 @@ def asinh(input, *, out=None):
output = ms.ops.asinh(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "asinh" )
def arcsinh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
shape = input.shape
if len(shape) > 7:
@@ -1224,12 +1083,10 @@ def arcsinh(input, *, out=None):
output = ms.ops.asinh(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arcsinh" )
def atan(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
shape = input.shape
if len(shape) > 7:
input = input.flatten()
@@ -1237,12 +1094,10 @@ def atan(input, *, out=None):
output = ms.ops.atan(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "atan" )
def arctan(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
shape = input.shape
if len(shape) > 7:
input = input.flatten()
@@ -1250,37 +1105,29 @@ def arctan(input, *, out=None):
output = ms.ops.atan(input)
if len(shape) > 7:
output = output.reshape(shape)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arctan" )
def atanh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.atanh(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "atanh" )
def arctanh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.atanh(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arctanh" )
def arctan2(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.atan2(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "arctan2" )
def bitwise_not(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
type = input.dtype
if str(type) != 'Bool':
@@ -1288,12 +1135,10 @@ def bitwise_not(input, *, out=None):
else:
output = 1 - input
output = output.astype(ms.bool_)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_not" )
def bitwise_and(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input)
input_is_bool = str(input.dtype) == 'Bool'
@@ -1312,12 +1157,10 @@ def bitwise_and(input, other, *, out=None):
output = ms.ops.bitwise_and(input, other)
if input_is_bool and other_is_bool:
output = output.astype(ms.bool_)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_and" )
def bitwise_or(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input)
input_is_bool = str(input.dtype) == 'Bool'
@@ -1336,12 +1179,10 @@ def bitwise_or(input, other, *, out=None):
output = ms.ops.bitwise_or(input, other)
if input_is_bool and other_is_bool:
output = output.astype(ms.bool_)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_or" )
def bitwise_xor(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input)
input_is_bool = str(input.dtype) == 'Bool'
@@ -1360,29 +1201,25 @@ def bitwise_xor(input, other, *, out=None):
output = ms.ops.bitwise_xor(input, other)
if input_is_bool and other_is_bool:
output = output.astype(ms.bool_)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_xor" )
def bitwise_left_shift(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).asnumpy()
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other).asnumpy()
output = ms.Tensor(np.left_shift(input, other))
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_left_shift" )
def bitwise_right_shift(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).asnumpy()
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other).asnumpy()
output = ms.Tensor(np.right_shift(input, other))
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "bitwise_right_shift" )
def split(tensor, split_size_or_sections, dim=0):
@@ -1440,8 +1277,6 @@ def split(tensor, split_size_or_sections, dim=0):
return res
def nonzero(input, *, out=None, as_tuple=False):
if out is not None:
warnings.warn("Do not support parameter 'out'.")
input = cast_to_ms_tensor(input)
if as_tuple:
if input.ndim == 1:
@@ -1451,34 +1286,28 @@ def nonzero(input, *, out=None, as_tuple=False):
output = []
res = ms.ops.nonzero(input)
res = res.transpose(1,0)
res = ms.ops.split(res, axis=0, output_num= input.ndim)
res = ms.ops.split(res, axis=0, output_num=input.ndim)
for cur in res:
output.append(cast_to_adapter_tensor(cur))
output = tuple(output)
elif input.ndim == 0:
raise ValueError("Do not support input ndim == 0.")
return output
return cast_to_adapter_tensor(ms.ops.nonzero(input))
return output # TODO: out is not assigned
output = ms.ops.nonzero(input)
return _out_inplace_assign(out, output, "nonzero")
def clip(input, min=None, max=None, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = input.clip(min, max)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "clip" )
def conj_physical(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.conj(input)
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "conj_physical")
def copysign(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
input_type = input.dtype
input = input.asnumpy()
@@ -1497,28 +1326,25 @@ def copysign(input, other, *, out=None):
output = output.astype(other_type)
elif is_num or 'Int' in str(other_type):
output = output.astype(input_type)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "copysign" )
def cosh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.cosh(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "cosh" )
def deg2rad(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
if input.dtype not in (ms.float16, ms.float32, ms.float64):
input = input.astype(ms.float32)
output = ms.ops.deg2rad(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "cosh" )
def devide(input, other, *, rounding_mode=None, out=None):
_out_limit_pynative(out, "devide")
return div(input, other, rounding_mode=rounding_mode, out=out)
#Todo: not found class Digamma
@@ -1526,43 +1352,33 @@ def devide(input, other, *, rounding_mode=None, out=None):
def erf(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.erf(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "erf" )
def erfc(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.erfc(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "erfc" )
def erfinv(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.erfinv(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "erfinv" )
def exp2(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.exp2(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "exp2" )
def expm1(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.expm1(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "expm1" )
def fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max):
@@ -1596,39 +1412,38 @@ def fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_m
def fix(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.trunc(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "fix" )
def float_power(input, exponent, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).asnumpy()
if isinstance(exponent, adapter_tensor):
exponent = cast_to_ms_tensor(exponent).asnumpy()
output = ms.Tensor(np.float_power(input, exponent))
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "float_power" )
def floor_divide(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.floor_div(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "floor_divide" )
def frexp(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
_out_limit_pynative(out, "frexp")
input = cast_to_ms_tensor(input).asnumpy()
mantissa, exponent = np.frexp(input)
return cast_to_adapter_tensor(ms.Tensor(mantissa)), cast_to_adapter_tensor(ms.Tensor(exponent))
out1 = ms.Tensor(mantissa)
out2 = ms.Tensor(exponent)
if out is not None and len(out) != 2:
out[0].assign_value(out1)
out[1].assign_value(out2)
return out
return cast_to_adapter_tensor(out1), cast_to_adapter_tensor(out2)
def gradient(input, *, spacing=1, dim=None, edge_order=1):
@@ -1652,17 +1467,13 @@ def imag(input):
def ldexp(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.ldexp(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "ldexp" )
def lerp(input, end, weight, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
end = cast_to_ms_tensor(end)
if isinstance(weight, adapter_tensor):
@@ -1670,7 +1481,7 @@ def lerp(input, end, weight, *, out=None):
elif not isinstance(weight, float):
weight = float(weight)
output = ms.ops.lerp(input, end, weight)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "lerp" )
#Todo
@@ -1678,74 +1489,60 @@ def lerp(input, end, weight, *, out=None):
def logaddexp(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.logaddexp(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logaddexp" )
def logaddexp2(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.logaddexp2(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logaddexp2" )
def logical_and(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).astype(ms.bool_)
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other).astype(ms.bool_)
output = ms.ops.logical_and(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logical_and" )
def logical_not(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).astype(ms.bool_)
output = ms.ops.logical_not(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logical_not" )
def logical_or(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).astype(ms.bool_)
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other).astype(ms.bool_)
output = ms.ops.logical_or(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logical_or" )
def logical_xor(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
if isinstance(input, adapter_tensor):
input = cast_to_ms_tensor(input).astype(ms.bool_)
if isinstance(other, adapter_tensor):
other = cast_to_ms_tensor(other).astype(ms.bool_)
output = ms.ops.logical_xor(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logical_xor" )
def logit(input, eps=None, *, out=None):
#TODO: ops.logit not support cpu
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
if eps is not None:
input = ms.ops.clip_by_value(input, eps, 1.0-eps)
output = ms.ops.log(input/(1.0-input))
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "logit" )
def frombuffer(buffer, *, dtype = None, count=- 1, offset=0, requires_grad=False):
unsupported_attr(requires_grad)
@@ -1770,62 +1567,50 @@ def as_strided(input, size, stride, storage_offset=None):
return cast_to_adapter_tensor(out)
def ne(input, other, *, out=None):
unsupported_attr(out)
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.ne(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "ne" )
def tanh(input, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = ms.ops.tanh(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "tanh" )
def maximum(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.maximum(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "maximum" )
def minimum(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.minimum(input, other)
return cast_to_adapter_tensor(output)
return _out_inplace_assign(out, output, "minimum")
def sigmoid(input, *, out=None):
#TODO: ms.ops.sigmoid() not support float64
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
output = 1 / (ms.ops.exp(0 - input) + 1)
#output = ms.ops.sigmoid(input)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "sigmoid" )
def softmax(input, dim, dtype=None, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
if dtype is not None:
input = input.astype(dtype)
output = ms.ops.softmax(input, dim)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "softmax" )
def prod(input, dim=None, keepdim=False, *, dtype=None, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
if dtype is not None:
input = input.astype(dtype)
@@ -1833,13 +1618,11 @@ def prod(input, dim=None, keepdim=False, *, dtype=None, out=None):
output = ms.ops.prod(input)
else:
output = ms.ops.prod(input, axis=dim, keep_dims=keepdim)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "prod" )
def eq(input, other, *, out=None):
if out is not None:
warnings.warn("do not support parameter out")
input = cast_to_ms_tensor(input)
other = cast_to_ms_tensor(other)
output = ms.ops.equal(input, other)
return cast_to_adapter_tensor(output )
return _out_inplace_assign(out, output, "eq" )