#409 Fixed conflicts in 0327 version

Merged
Erpim merged 8 commits from dev0327 into master 1 year ago
  1. +1
    -0
      SupportedList.md
  2. +2
    -1
      SupportedList_en.md
  3. +15
    -8
      msadapter/pytorch/functional.py
  4. +1
    -0
      msadapter/pytorch/linalg/linalg.py
  5. +20
    -53
      msadapter/pytorch/nn/functional.py
  6. +7
    -15
      msadapter/pytorch/nn/modules/rnn.py
  7. +15
    -18
      msadapter/pytorch/tensor.py
  8. +4
    -1
      testing/ut/pytorch/functional/test_function.py
  9. +6
    -0
      testing/ut/pytorch/nn/functional/test_functional.py
  10. +4
    -1
      testing/ut/pytorch/nn/functional/test_loss.py
  11. +5
    -4
      testing/ut/pytorch/nn/test_activation.py
  12. +5
    -4
      testing/ut/pytorch/nn/test_loss.py
  13. +1
    -1
      testing/ut/pytorch/nn/test_rnn.py
  14. +3
    -1
      testing/ut/pytorch/tensor/test_tensor2.py

+ 1
- 0
SupportedList.md View File

@@ -275,6 +275,7 @@
| torch.view_as_real | 支持 | |
| torch.scatter | 不支持 | |
| torch.manual_seed | 支持 | |
| torch.matrix_exp | 不支持 | |
| torch.bernoulli | 支持 | |
| torch.multinomial | 支持 | Ascend上暂不支持,[输入参数有限制](ConstraintList.md) |
| torch.randint | 支持 | [输入参数有限制](ConstraintList.md) |


+ 2
- 1
SupportedList_en.md View File

@@ -262,6 +262,7 @@ English | [简体中文](SupportedList.md)
| torch.inner | Supported | [Input type is constrained](ConstraintList_en.md) |
| torch.logdet | Supported | Currently not support on Ascend |
| torch.mm | Supported | [Input type is constrained](ConstraintList_en.md) |
| torch.matrix_exp | Unspported | |
| torch.cuda.is_available | Supported | |
| torch.ByteTensor | Supported | |
| torch.CharTensor | Supported | |
@@ -977,4 +978,4 @@ English | [简体中文](SupportedList.md)
- Not support layout, device, requires_grad, memory_format
- Not support 7D and higher dimensions
- Ascend not fully support float64 type value as input, if the function is not applicable for float64, please try float32 and float16 instead.
- For the function with note "Input type is constrained", please check the [contraint list](ConstraintList_en.md) for more details
- For the function with note "Input type is constrained", please check the [contraint list](ConstraintList_en.md) for more details

+ 15
- 8
msadapter/pytorch/functional.py View File

@@ -531,9 +531,8 @@ def max(input, dim=None, keepdim=False, *, out=None):
ops.assign(out, output)
return out
return cast_to_adapter_tensor(output)
output = list(ms.ops.max(input, axis=dim, keep_dims=keepdim))
value = output[1].astype(type)
indice = output[0]
value, indice = ms.ops.max(input, dim, keepdim)
value = value.astype(type)
point = collections.namedtuple('max', 'values,indices')
rlt = point(cast_to_adapter_tensor(value), cast_to_adapter_tensor(indice))
if out is not None:
@@ -555,7 +554,7 @@ def min(input, dim=None, keepdim=False, *, out=None):
if dim is None:
return cast_to_adapter_tensor(input.min())

indices, result = ms.ops.min(input, axis=dim, keep_dims=keepdim)
result, indices = ms.ops.min(input, dim, keepdim)
if out is not None:
if pynative_mode_condition():
if len(out) != 2 or not isinstance(out[0], adapter_tensor) or not isinstance(out[1], adapter_tensor):
@@ -1443,7 +1442,7 @@ def lu_solve(b, LU_data, LU_pivots, *, out=None):
output = b.lu_solve(LU_data, LU_pivots)
return _out_inplace_assign_with_adapter_tensor(out, output, "lu_solve")

#TODO: Enable atfer upgrading
#TODO: Enable after upgrading
def lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None):
LU_data = cast_to_ms_tensor(LU_data)
LU_pivots = cast_to_ms_tensor(LU_pivots)
@@ -2393,18 +2392,26 @@ def block_diag(*tensors):
inputs = cast_to_ms_tensor(tensors)
output = ms.ops.block_diag(*inputs)
return cast_to_adapter_tensor(output)

def logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=None, device=None, requires_grad=False):
unsupported_attr(layout)
unsupported_attr(device)
unsupported_attr(requires_grad)
start = ms.Tensor(start, dtype=dtype)
end = ms.Tensor(end, dtype=dtype)
# TODO: For later version the base should be int type, enable atfer version upgrading
# base = base.astype(int32)
if base % 1 != 0:
raise ValueError("For logspace, base only support integer")
base = int(base)
if dtype is None:
dtype = ms.float32
_dtype = dtype

if start.dtype in all_int_type or end.dtype in all_int_type or dtype in all_int_type:
start = start.astype(mstype.float32)
end = end.astype(mstype.float32)
output = ms.ops.logspace(start, end, steps, base, dtype=dtype)
_dtype = mstype.float32
output = ms.ops.logspace(start, end, steps, base, dtype=_dtype)
output = output.astype(dtype)
return _out_inplace_assign(out, output, "logspace")

def column_stack(tensors, *, out=None):


+ 1
- 0
msadapter/pytorch/linalg/linalg.py View File

@@ -114,6 +114,7 @@ def lu_factor_ex(A, *, pivot=True, out=None):
else:
output = vmap(ms_linalg.lu_factor, in_axes= (0, None, None))(A, False, True)
#TODO: Mindspore not support check_errors
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
info = _get_cache_prim(ms.ops.Zeros)()(A.shape[0], ms.int32)
output = output + (info,)
return _out_inplace_assign(out, output, "lu_factor_ex")


+ 20
- 53
msadapter/pytorch/nn/functional.py View File

@@ -306,20 +306,11 @@ def upsample(input, size=None, scale_factor=None, mode='nearest',
raise ValueError("only one of size or scale_factor should be defined")

def linear_func(input):
#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
trans_mode = 'align_corners'
else:
trans_mode = 'half_pixel'

_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)

input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
#out = ms.ops.interpolate(input, scale_factor=None, size=_size,
# align_corners=align_corners, mode=mode)
out = ms.ops.interpolate(input, scales=None, sizes=_size,
coordinate_transformation_mode=trans_mode, mode=mode)
out = ms.ops.interpolate(input, scale_factor=None, size=_size,
align_corners=align_corners, mode=mode)

return cast_to_adapter_tensor(out)

@@ -784,15 +775,8 @@ def upsample_bilinear(input, size=None, scale_factor=None, *, align_corners=True
size_ = _upsample_common_process_size(size, scale_factor, input_shape)

input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
_cor_mode = "align_corners"
else:
_cor_mode = "half_pixel"

#TODO: if switch the mindspore version, change the code to
# result = ms.ops.interpolate(input, size=size_, align_corners=align_corners, mode="bilinear")
result = ms.ops.interpolate(input, sizes=size_, coordinate_transformation_mode=_cor_mode, mode="bilinear")
result = ms.ops.interpolate(input, size=size_, align_corners=align_corners, mode="bilinear")
return cast_to_adapter_tensor(result)

def pairwise_distance(x1, x2, p=2.0, eps=1e-06, keepdim=False):
@@ -894,9 +878,7 @@ def dropout2d(input, p=0.5, training=True, inplace=False):
return dropout1d(input, p, training, inplace)

input_ms = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
# out = ms.ops.dropout2d(input_ms, p)
out, _ = ms.ops.dropout2d(input_ms, p)
out = ms.ops.dropout2d(input_ms, p)
return _inplace_assign_pynative(input, inplace, out, "dropout2d")


@@ -919,9 +901,7 @@ def dropout3d(input, p=0.5, training=True, inplace=False):
input_ms = cast_to_ms_tensor(input)
if not is_batched:
input_ms = ms.ops.expand_dims(input_ms, 0)
#TODO: if switch the mindspore version, change the code to
# out = ms.ops.dropout3d(input_ms, p)
out, _ = ms.ops.dropout3d(input_ms, p)
out = ms.ops.dropout3d(input_ms, p)
if not is_batched:
out = ms.ops.squeeze(out, 0)

@@ -1350,20 +1330,11 @@ def interpolate(input,
if input.dim() != 3:
raise ValueError(f"'linear' mode only support 3D input, but got {input.dim()}D")

#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
trans_mode = 'align_corners'
else:
trans_mode = 'half_pixel'

_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)

input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
#out = ms.ops.interpolate(input, scale_factor=None, size=_size,
# align_corners=align_corners, mode=mode)
out = ms.ops.interpolate(input, scales=None, sizes=_size,
coordinate_transformation_mode=trans_mode, mode=mode)
out = ms.ops.interpolate(input, scale_factor=None, size=_size,
align_corners=align_corners, mode=mode)
return cast_to_adapter_tensor(out)

if mode in ['bicubic', 'trilinear', 'area', 'nearest-exact']:
@@ -1426,7 +1397,7 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
grid = cast_to_ms_tensor(grid)
if align_corners is None:
align_corners = False
output = ms.ops.grid_sample(input, grid, interpolation_mode=mode,
output = ms.ops.grid_sample(input, grid, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
output = cast_to_adapter_tensor(output)
return output
@@ -1446,9 +1417,9 @@ def _get_conv1d_const(stride, padding, dilation):
stride = stride[0]
pad_mode = "pad"
if isinstance(padding, int):
padding = (0, 0, padding, padding)
padding = (0, padding)
elif isinstance(padding, tuple):
padding = (0, 0, padding[0], padding[0])
padding = (0, padding[0])
else:
pad_mode = padding
padding = 0
@@ -1472,7 +1443,7 @@ def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
_pad_mode, _stride, _padding, _dilation = _get_conv1d_const(stride, padding, dilation)
input_ms = ms.ops.expand_dims(input_ms, 2)
weight_ms = ms.ops.expand_dims(weight_ms, 2)
output = ms.ops.conv2d(input_ms, weight_ms, _pad_mode, _padding, _stride, _dilation, groups)
output = ms.ops.conv2d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
@@ -1495,12 +1466,11 @@ def _get_conv2d_const(stride, padding, dilation):
stride = (stride[0], stride[0])
pad_mode = "pad"
if isinstance(padding, int):
padding = (padding, padding, padding, padding)
padding = (padding, padding)
elif isinstance(padding, tuple):
if len(padding)==1:
padding = (padding[0], padding[0], padding[0], padding[0])
else:
padding = (padding[0], padding[0], padding[1], padding[1])
padding = (padding[0], padding[0])

else:
pad_mode = padding
padding = 0
@@ -1525,7 +1495,7 @@ def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
is_float64 = True

_pad_mode, _stride, _padding, _dilation = _get_conv2d_const(stride, padding, dilation)
output = ms.ops.conv2d(input_ms, weight_ms, _pad_mode, _padding, _stride, _dilation, groups)
output = ms.ops.conv2d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
@@ -2223,12 +2193,11 @@ def _get_conv3d_const(stride, padding, dilation):
stride = (stride[0], stride[0], stride[0])
pad_mode = "pad"
if isinstance(padding, int):
padding = (padding, padding, padding, padding, padding, padding)
padding = (padding, padding, padding)
elif isinstance(padding, tuple):
if len(padding)==1:
padding = (padding[0], padding[0], padding[0], padding[0], padding[0], padding[0])
else:
padding = (padding[0], padding[0], padding[1], padding[1], padding[2], padding[2])
padding = (padding[0], padding[0], padding[0])

else:
pad_mode = padding
padding = 0
@@ -2250,7 +2219,7 @@ def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
is_float64 = True

_pad_mode, _padding, _stride, _dilation = _get_conv3d_const(stride, padding, dilation)
output = ms.ops.conv3d(input_ms, weight_ms, _pad_mode, _padding, _stride, _dilation, groups)
output = ms.ops.conv3d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
@@ -2267,9 +2236,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
# TODO: do not support on GPU
input_ms = cast_to_ms_tensor(input)
output = ms.ops.unfold(input_ms, kernel_size, dilation, padding, stride)
# TODO: Enable atfer version upgrading
#output = output.reshape(output.shape[0], output.shape[1] * output.shape[2], -1)
output = output.reshape(output.shape[0], output.shape[1], -1)
output = output.reshape(output.shape[0], output.shape[1] * output.shape[2], -1)
return cast_to_adapter_tensor(output)




+ 7
- 15
msadapter/pytorch/nn/modules/rnn.py View File

@@ -235,9 +235,7 @@ class RNNBase(Module):
output, h_t = self.rnn_cell(pre_layer, hx[i], None, w_ih, w_hh, b_ih, b_hh)
h_n += (h_t,)

#TODO:modified after version upgrading
#pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
pre_layer = ms.ops.dropout(output, 1 - self.dropout)[0] \
pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
if (self.dropout != 0 and i < self.num_layers - 1) else output
else:
for i in range(self.num_layers):
@@ -253,10 +251,8 @@ class RNNBase(Module):
h_n += (h_t,)
h_n += (h_t_b,)

#TODO:modified after version upgrading
#pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
pre_layer = ms.ops.dropout(output, 1 - self.dropout)[0] \
if (self.dropout != 0 and i < self.num_layers - 1) else output
pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
if (self.dropout != 0 and i < self.num_layers - 1) else output

h_n = ms.ops.concat(h_n, 0)
h_n = h_n.view(hx.shape)
@@ -366,10 +362,8 @@ class LSTM(RNNBase):
h_n += (h_t,)
c_n += (c_t,)

#TODO:modified after version upgrading
#pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
pre_layer = ms.ops.dropout(output, 1 - self.dropout)[0] \
if (self.dropout != 0 and i < self.num_layers - 1) else output
pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
if (self.dropout != 0 and i < self.num_layers - 1) else output
else:
for i in range(self.num_layers):
w_ih, w_hh, b_ih, b_hh, w_ih_b, w_hh_b, b_ih_b, b_hh_b = \
@@ -390,10 +384,8 @@ class LSTM(RNNBase):
c_n += (c_t,)
c_n += (c_t_b,)

#TODO:modified after version upgrading
#pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
pre_layer = ms.ops.dropout(output, 1 - self.dropout)[0] \
if (self.dropout != 0 and i < self.num_layers - 1) else output
pre_layer = ms.ops.dropout(output, 1 - self.dropout) \
if (self.dropout != 0 and i < self.num_layers - 1) else output

h_n = ms.ops.concat(h_n, 0)
h_n = h_n.view(hx[0].shape)


+ 15
- 18
msadapter/pytorch/tensor.py View File

@@ -461,9 +461,8 @@ class Tensor(ms.Tensor):
out_shape = list(tensor_ms.shape[len(index.shape) - 1:])
out_shape[0] = 0
out_shape = tuple(out_shape)
#TODO: if switch the mindspore version, change the code to
# out = _get_cache_prim(ms.ops.Zeros)()(out_shape, tensor_ms.dtype)
out = ms.ops.zeros(out_shape, dtype=tensor_ms.dtype)
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
out = _get_cache_prim(ms.ops.Zeros)()(out_shape, tensor_ms.dtype)
return out
ms_shape_len = len(tensor_ms.shape)
index_shape_len = len(index.shape)
@@ -490,9 +489,8 @@ class Tensor(ms.Tensor):
out_shape = list(tmp_out.shape)
for i in range(len(out_shape)):
out_shape[i] = out_shape[i] * scale[i]
#TODO: if switch the mindspore version, change the code to
#out = _get_cache_prim(ms.ops.Zeros)()(tuple(out_shape), tensor_ms.dtype)
out = ms.ops.zeros(tuple(out_shape), dtype=tensor_ms.dtype)
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
out = _get_cache_prim(ms.ops.Zeros)()(tuple(out_shape), tensor_ms.dtype)
else:
out = tensor_ms.__getitem__(index)
return out
@@ -822,7 +820,7 @@ class Tensor(ms.Tensor):
raise TypeError("For 'Tensor.min', the type of `input` do not support `torch.int64` and "
"`torch.int32`, got {}.".format(dtype_name))

indices, result = P.min(input, axis=dim, keep_dims=keepdim)
indices, result = ms.ops.min(input, axis=dim, keep_dims=keepdim)
return cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices)

def max(self, dim=None, keepdim=False):
@@ -838,7 +836,7 @@ class Tensor(ms.Tensor):
raise TypeError("For 'Tensor.max', the type of `input` do not support `torch.int64` and "
"`torch.int32`, got {}.".format(dtype_name))

indices, result = P.max(input, axis=dim, keep_dims=keepdim)
result, indices = ms.ops.max(input, axis=dim, keepdims=keepdim)
return cast_to_adapter_tensor(result), cast_to_adapter_tensor(indices)

def numel(self):
@@ -930,9 +928,8 @@ class Tensor(ms.Tensor):
if input_size[0] == 0: # only support first element is 0
numel = ms.ops.size(self)
shape = _infer_size(shape, numel)
#TODO: if switch the mindspore version, change the code to
#output = _get_cache_prim(ms.ops.Zeros)()(shape, self.dtype)
output = ms.ops.zeros(shape, self.dtype)
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
output = _get_cache_prim(ms.ops.Zeros)()(shape, self.dtype)
else:
input = cast_to_ms_tensor(self)
output = tensor_operator_registry.get('reshape')()(input, shape)
@@ -1013,9 +1010,8 @@ class Tensor(ms.Tensor):
if input_size[0] == 0: # only support first element is 0
numel = ms.ops.size(input_ms)
shape = _infer_size(shape, numel)
#TODO: if switch the mindspore version, change the code to
#output = _get_cache_prim(ms.ops.Zeros)()(shape, input_ms.dtype)
output = ms.ops.zeros(shape, input_ms.dtype)
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
output = _get_cache_prim(ms.ops.Zeros)()(shape, input_ms.dtype)
else:
output = input_ms.reshape(*shape)
return cast_to_adapter_tensor(output)
@@ -1069,14 +1065,14 @@ class Tensor(ms.Tensor):
def amax(self, dim=None, keepdim=False):
input_ms = cast_to_ms_tensor(self)
if dim is not None:
return cast_to_adapter_tensor(input_ms.amax(axis=dim, keep_dims=keepdim))
return cast_to_adapter_tensor(input_ms.amax(keep_dims=keepdim))
return cast_to_adapter_tensor(input_ms.amax(axis=dim, keepdims=keepdim))
return cast_to_adapter_tensor(input_ms.amax(keepdims=keepdim))

def amin(self, dim=None, keepdim=False):
input_ms = cast_to_ms_tensor(self)
if dim is not None:
return cast_to_adapter_tensor(input_ms.amin(axis=dim, keep_dims=keepdim))
return cast_to_adapter_tensor(input_ms.amin(keep_dims=keepdim))
return cast_to_adapter_tensor(input_ms.amin(axis=dim, keepdims=keepdim))
return cast_to_adapter_tensor(input_ms.amin(keepdims=keepdim))

def as_strided(self, size, stride, storage_offset=None):
warnings.warn("not support output as a view.")
@@ -3487,5 +3483,6 @@ def _lu_factor_ex(A, *, pivot=True):
else:
output = vmap(ms_linalg.lu_factor, in_axes= (0, None, None))(A, False, True)
#TODO: Mindspore not support check_errors
#TODO: ms.ops.zeros() currently has preblem handling input shape including 0
info = _get_cache_prim(ms.ops.Zeros)()(A.shape[0], ms.int32)
return output, info

+ 4
- 1
testing/ut/pytorch/functional/test_function.py View File

@@ -1389,6 +1389,8 @@ def test_row_stack():
assert np.allclose(torch_out2.numpy(), ms_out2.numpy())
assert torch_out2.numpy().dtype == ms_out2.numpy().dtype

#TODO:Unsupported op [MatrixExp] on CPU
'''
def test_matrix_exp():
A = np.empty([2, 2, 2])
A[0, :, :] = np.eye(2, 2)
@@ -1402,6 +1404,7 @@ def test_matrix_exp():

assert np.allclose(torch_out.numpy(), ms_out.numpy())
assert torch_out.numpy().dtype == ms_out.numpy().dtype
'''

def test_mv():
mat = np.random.randn(2, 3)
@@ -2370,7 +2373,7 @@ if __name__ == '__main__':
test_swapdims()
test_swapaxes()
test_row_stack()
test_matrix_exp()
#test_matrix_exp()
test_argwhere()
test_mv()
test_blackman_window()


+ 6
- 0
testing/ut/pytorch/nn/functional/test_functional.py View File

@@ -48,6 +48,8 @@ def test_interpolate4():

assert np.allclose(ms_output.asnumpy(), torch_output.numpy())

#TODO:Unsupported op [UpsampleNearest3D] on CPU
'''
def test_interpolate5():
tensor = np.arange(1, 5).reshape((1, 1, 1, 2, 2)).astype(np.float32)
torch_tensor = torch.tensor(tensor)
@@ -57,6 +59,7 @@ def test_interpolate5():
ms_output = interpolate(ms_tensor, size=3, mode="nearest")

assert np.allclose(ms_output.asnumpy(), torch_output.numpy())
'''

def test_adaptive_avg_pool2d():
tensor = np.random.randn(1, 32, 9, 9).astype(np.float32)
@@ -118,6 +121,8 @@ def test_upsample_nearest3():
assert (torch_output.shape == ms_output.shape)
assert np.allclose(ms_output.asnumpy(), torch_output.numpy(), atol=1e-4)

#TODO: Unsupported op [UpsampleNearest3D] on CPU
'''
def test_upsample_nearest4():
data = np.random.randn(2, 3, 4, 5, 6).astype(np.float32)

@@ -153,6 +158,7 @@ def test_upsample_nearest6():

assert (torch_output.shape == ms_output.shape)
assert np.allclose(ms_output.asnumpy(), torch_output.numpy(), atol=1e-4)
'''

def test_upsample_bilinear1():
data = np.random.randn(2, 3, 4, 5).astype(np.float32)


+ 4
- 1
testing/ut/pytorch/nn/functional/test_loss.py View File

@@ -148,6 +148,8 @@ def test_huber_loss():
assert np.allclose(result_ms.asnumpy(), result_torch.numpy())
assert result_ms.shape == result_torch.shape

#TODO: Unsupported op [TripletMarginLoss] on CPU
'''
def test_triplet_margin_loss():
np_anc = np.random.randn(100, 128)
np_pos = np.random.randn(100, 128)
@@ -165,6 +167,7 @@ def test_triplet_margin_loss():

assert np.allclose(result_ms.asnumpy(), result_torch.detach().numpy())
assert result_ms.shape == result_torch.shape
'''

if __name__ == '__main__':
test_ctc_loss()
@@ -175,4 +178,4 @@ if __name__ == '__main__':
test_multilabel_soft_margin_loss()
test_multi_margin_loss()
test_huber_loss()
test_triplet_margin_loss()
#test_triplet_margin_loss()

+ 5
- 4
testing/ut/pytorch/nn/test_activation.py View File

@@ -509,7 +509,8 @@ def test_hardsigmoid():
assert np.allclose(ms_out.asnumpy(), torch_out.numpy(), atol=1e-5)
assert ms_out.asnumpy().dtype == torch_out.numpy().dtype


#TODO: multiheadattention need reconstruct
'''
def test_multi_head_attention1():
_embed_dim = 20
_target_seq_length = 6
@@ -561,7 +562,7 @@ def test_multi_head_attention2():
ms_output = ms_net(ms_query, ms_key, ms_val, need_weights=False)

assert ms_output[0].shape == torch_output[0].shape
'''
def test_prelu():
input = np.array([[[[0.1, 0.6], [0.9, 0.9]]]]).astype(np.float32)
weight_init = 0.25
@@ -647,8 +648,8 @@ if __name__ == '__main__':
test_softsign()
test_glu()
test_hardshrink()
test_multi_head_attention1()
test_multi_head_attention2()
#test_multi_head_attention1()
#test_multi_head_attention2()
test_prelu()
test_softplus()
test_softmax2d()

+ 5
- 4
testing/ut/pytorch/nn/test_loss.py View File

@@ -494,7 +494,8 @@ def test_cosine_embedding_loss_mean():
assert np.allclose(result_ms.asnumpy(), result_torch.numpy())
assert result_ms.asnumpy().dtype == result_torch.numpy().dtype
assert result_ms.shape == result_torch.shape

#TODO:Unsupported op [TripletMarginLoss] on CPU
'''
def test_cosine_triplet_margin_loss_none():
anchor = np.random.randn(100, 128).astype(np.float32)
positive = np.random.randn(100, 128).astype(np.float32)
@@ -557,7 +558,7 @@ def test_cosine_triplet_margin_loss_mean():
assert np.allclose(result_ms.asnumpy(), result_torch.numpy())
assert result_ms.asnumpy().dtype == result_torch.numpy().dtype
assert result_ms.shape == result_torch.shape
'''
def test_multi_margin_loss_none():
x = np.array([[0.1, 0.2, 0.4, 0.8]])
y = np.array([3])
@@ -802,11 +803,11 @@ if __name__ == '__main__':
test_cosine_embedding_loss_none()
test_cosine_embedding_loss_sum()
test_cosine_embedding_loss_mean()
'''
test_cosine_triplet_margin_loss_none()
test_cosine_triplet_margin_loss_sum()
test_cosine_triplet_margin_loss_mean()
'''
test_multi_margin_loss_none()
test_multi_margin_loss_weight()



+ 1
- 1
testing/ut/pytorch/nn/test_rnn.py View File

@@ -374,4 +374,4 @@ if __name__ == '__main__':
test_grucell3()
test_lstmcell1()
test_lstmcell2()
test_lstmcell3()
test_lstmcell3()

+ 3
- 1
testing/ut/pytorch/tensor/test_tensor2.py View File

@@ -312,6 +312,8 @@ def test_fmax():

assert np.allclose(torch_output.numpy(), ms_out.numpy(), equal_nan=True)

#TODO:Unsupported op [Fmin] on CPU
'''
def test_fmin():
a = torch.tensor([1., float('nan'), 3, float('nan')])
b = torch.tensor([float('nan'), 2., 1., float('nan')])
@@ -322,7 +324,7 @@ def test_fmin():
ms_out = a.fmin(b)

assert np.allclose(torch_output.numpy(), ms_out.numpy(), equal_nan=True)
'''
def test_H():
a = torch.tensor([[1+1j, 2], [1-1j, 1]])
torch_out = a.H


Loading…
Cancel
Save