@@ -306,20 +306,11 @@ def upsample(input, size=None, scale_factor=None, mode='nearest',
raise ValueError("only one of size or scale_factor should be defined")
raise ValueError("only one of size or scale_factor should be defined")
def linear_func(input):
def linear_func(input):
#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
trans_mode = 'align_corners'
else:
trans_mode = 'half_pixel'
_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)
_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)
input = cast_to_ms_tensor(input)
input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
#out = ms.ops.interpolate(input, scale_factor=None, size=_size,
# align_corners=align_corners, mode=mode)
out = ms.ops.interpolate(input, scales=None, sizes=_size,
coordinate_transformation_mode=trans_mode, mode=mode)
out = ms.ops.interpolate(input, scale_factor=None, size=_size,
align_corners=align_corners, mode=mode)
return cast_to_adapter_tensor(out)
return cast_to_adapter_tensor(out)
@@ -784,15 +775,8 @@ def upsample_bilinear(input, size=None, scale_factor=None, *, align_corners=True
size_ = _upsample_common_process_size(size, scale_factor, input_shape)
size_ = _upsample_common_process_size(size, scale_factor, input_shape)
input = cast_to_ms_tensor(input)
input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
_cor_mode = "align_corners"
else:
_cor_mode = "half_pixel"
#TODO: if switch the mindspore version, change the code to
# result = ms.ops.interpolate(input, size=size_, align_corners=align_corners, mode="bilinear")
result = ms.ops.interpolate(input, sizes=size_, coordinate_transformation_mode=_cor_mode, mode="bilinear")
result = ms.ops.interpolate(input, size=size_, align_corners=align_corners, mode="bilinear")
return cast_to_adapter_tensor(result)
return cast_to_adapter_tensor(result)
def pairwise_distance(x1, x2, p=2.0, eps=1e-06, keepdim=False):
def pairwise_distance(x1, x2, p=2.0, eps=1e-06, keepdim=False):
@@ -894,9 +878,7 @@ def dropout2d(input, p=0.5, training=True, inplace=False):
return dropout1d(input, p, training, inplace)
return dropout1d(input, p, training, inplace)
input_ms = cast_to_ms_tensor(input)
input_ms = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
# out = ms.ops.dropout2d(input_ms, p)
out, _ = ms.ops.dropout2d(input_ms, p)
out = ms.ops.dropout2d(input_ms, p)
return _inplace_assign_pynative(input, inplace, out, "dropout2d")
return _inplace_assign_pynative(input, inplace, out, "dropout2d")
@@ -919,9 +901,7 @@ def dropout3d(input, p=0.5, training=True, inplace=False):
input_ms = cast_to_ms_tensor(input)
input_ms = cast_to_ms_tensor(input)
if not is_batched:
if not is_batched:
input_ms = ms.ops.expand_dims(input_ms, 0)
input_ms = ms.ops.expand_dims(input_ms, 0)
#TODO: if switch the mindspore version, change the code to
# out = ms.ops.dropout3d(input_ms, p)
out, _ = ms.ops.dropout3d(input_ms, p)
out = ms.ops.dropout3d(input_ms, p)
if not is_batched:
if not is_batched:
out = ms.ops.squeeze(out, 0)
out = ms.ops.squeeze(out, 0)
@@ -1350,20 +1330,11 @@ def interpolate(input,
if input.dim() != 3:
if input.dim() != 3:
raise ValueError(f"'linear' mode only support 3D input, but got {input.dim()}D")
raise ValueError(f"'linear' mode only support 3D input, but got {input.dim()}D")
#TODO: if switch the mindspore version, delete the next four lines
if align_corners is True:
trans_mode = 'align_corners'
else:
trans_mode = 'half_pixel'
_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)
_size =_upsample_common_process_size(size=size, scale_factor=scale_factor, shape=input.shape)
input = cast_to_ms_tensor(input)
input = cast_to_ms_tensor(input)
#TODO: if switch the mindspore version, change the code to
#out = ms.ops.interpolate(input, scale_factor=None, size=_size,
# align_corners=align_corners, mode=mode)
out = ms.ops.interpolate(input, scales=None, sizes=_size,
coordinate_transformation_mode=trans_mode, mode=mode)
out = ms.ops.interpolate(input, scale_factor=None, size=_size,
align_corners=align_corners, mode=mode)
return cast_to_adapter_tensor(out)
return cast_to_adapter_tensor(out)
if mode in ['bicubic', 'trilinear', 'area', 'nearest-exact']:
if mode in ['bicubic', 'trilinear', 'area', 'nearest-exact']:
@@ -1426,7 +1397,7 @@ def grid_sample(input, grid, mode='bilinear', padding_mode='zeros', align_corner
grid = cast_to_ms_tensor(grid)
grid = cast_to_ms_tensor(grid)
if align_corners is None:
if align_corners is None:
align_corners = False
align_corners = False
output = ms.ops.grid_sample(input, grid, interpolation_ mode=mode,
output = ms.ops.grid_sample(input, grid, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
padding_mode=padding_mode, align_corners=align_corners)
output = cast_to_adapter_tensor(output)
output = cast_to_adapter_tensor(output)
return output
return output
@@ -1446,9 +1417,9 @@ def _get_conv1d_const(stride, padding, dilation):
stride = stride[0]
stride = stride[0]
pad_mode = "pad"
pad_mode = "pad"
if isinstance(padding, int):
if isinstance(padding, int):
padding = (0, 0, padding, padding)
padding = (0, padding)
elif isinstance(padding, tuple):
elif isinstance(padding, tuple):
padding = (0, 0, padding[0], padding[0])
padding = (0, padding[0])
else:
else:
pad_mode = padding
pad_mode = padding
padding = 0
padding = 0
@@ -1472,7 +1443,7 @@ def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
_pad_mode, _stride, _padding, _dilation = _get_conv1d_const(stride, padding, dilation)
_pad_mode, _stride, _padding, _dilation = _get_conv1d_const(stride, padding, dilation)
input_ms = ms.ops.expand_dims(input_ms, 2)
input_ms = ms.ops.expand_dims(input_ms, 2)
weight_ms = ms.ops.expand_dims(weight_ms, 2)
weight_ms = ms.ops.expand_dims(weight_ms, 2)
output = ms.ops.conv2d(input_ms, weight_ms, _pad_mode, _padding, _stride , _dilation, groups)
output = ms.ops.conv2d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
if bias.dtype != output.dtype:
@@ -1495,12 +1466,11 @@ def _get_conv2d_const(stride, padding, dilation):
stride = (stride[0], stride[0])
stride = (stride[0], stride[0])
pad_mode = "pad"
pad_mode = "pad"
if isinstance(padding, int):
if isinstance(padding, int):
padding = (padding, padding, padding, padding )
padding = (padding, padding)
elif isinstance(padding, tuple):
elif isinstance(padding, tuple):
if len(padding)==1:
if len(padding)==1:
padding = (padding[0], padding[0], padding[0], padding[0])
else:
padding = (padding[0], padding[0], padding[1], padding[1])
padding = (padding[0], padding[0])
else:
else:
pad_mode = padding
pad_mode = padding
padding = 0
padding = 0
@@ -1525,7 +1495,7 @@ def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
is_float64 = True
is_float64 = True
_pad_mode, _stride, _padding, _dilation = _get_conv2d_const(stride, padding, dilation)
_pad_mode, _stride, _padding, _dilation = _get_conv2d_const(stride, padding, dilation)
output = ms.ops.conv2d(input_ms, weight_ms, _pad_mode, _padding, _stride , _dilation, groups)
output = ms.ops.conv2d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
if bias.dtype != output.dtype:
@@ -2223,12 +2193,11 @@ def _get_conv3d_const(stride, padding, dilation):
stride = (stride[0], stride[0], stride[0])
stride = (stride[0], stride[0], stride[0])
pad_mode = "pad"
pad_mode = "pad"
if isinstance(padding, int):
if isinstance(padding, int):
padding = (padding, padding, padding, padding, padding, padding )
padding = (padding, padding, padding)
elif isinstance(padding, tuple):
elif isinstance(padding, tuple):
if len(padding)==1:
if len(padding)==1:
padding = (padding[0], padding[0], padding[0], padding[0], padding[0], padding[0])
else:
padding = (padding[0], padding[0], padding[1], padding[1], padding[2], padding[2])
padding = (padding[0], padding[0], padding[0])
else:
else:
pad_mode = padding
pad_mode = padding
padding = 0
padding = 0
@@ -2250,7 +2219,7 @@ def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
is_float64 = True
is_float64 = True
_pad_mode, _padding, _stride, _dilation = _get_conv3d_const(stride, padding, dilation)
_pad_mode, _padding, _stride, _dilation = _get_conv3d_const(stride, padding, dilation)
output = ms.ops.conv3d(input_ms, weight_ms, _pad_mode, _padding, _stride , _dilation, groups)
output = ms.ops.conv3d(input_ms, weight_ms, None, _stride, _pad_mode, _padding, _dilation, groups)
if bias is not None:
if bias is not None:
# TODO: ms.ops.biasadd also not support float64
# TODO: ms.ops.biasadd also not support float64
if bias.dtype != output.dtype:
if bias.dtype != output.dtype:
@@ -2267,9 +2236,7 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
# TODO: do not support on GPU
# TODO: do not support on GPU
input_ms = cast_to_ms_tensor(input)
input_ms = cast_to_ms_tensor(input)
output = ms.ops.unfold(input_ms, kernel_size, dilation, padding, stride)
output = ms.ops.unfold(input_ms, kernel_size, dilation, padding, stride)
# TODO: Enable atfer version upgrading
#output = output.reshape(output.shape[0], output.shape[1] * output.shape[2], -1)
output = output.reshape(output.shape[0], output.shape[1], -1)
output = output.reshape(output.shape[0], output.shape[1] * output.shape[2], -1)
return cast_to_adapter_tensor(output)
return cast_to_adapter_tensor(output)