add testcase adapt adaptive_max_pool3d for ascend Co-authored-by: lvhaoyu <lvhaoyu@huawei.com> Reviewed-on: #424tags/v0.1rc
@@ -3,6 +3,7 @@ | |||
"""Functional interface""" | |||
import math | |||
import warnings | |||
from typing import Iterable | |||
from functools import lru_cache | |||
import numpy as np | |||
import mindspore as ms | |||
@@ -12,7 +13,7 @@ from mindspore.ops.operations.nn_ops import TripletMarginLoss as TripletMarginLo | |||
from mindspore.ops._primitive_cache import _get_cache_prim | |||
from mindspore.ops.function.math_func import _expand, _check_same_type | |||
from msadapter.utils import unsupported_attr, _GLOBAL_LRU_CACHE_SIZE_NN | |||
from msadapter.utils import unsupported_attr, _GLOBAL_LRU_CACHE_SIZE_NN, is_under_ascend_context | |||
from msadapter.pytorch.tensor import Tensor, cast_to_ms_tensor, cast_to_adapter_tensor | |||
from msadapter.pytorch.common._inner import _inplace_assign_pynative | |||
from msadapter.pytorch.common.dtype import all_int_type | |||
@@ -165,9 +166,65 @@ def adaptive_max_pool2d(input, output_size, return_indices=False): | |||
output = ms.ops.adaptive_max_pool2d(input, output_size, return_indices) | |||
return cast_to_adapter_tensor(output) | |||
@constexpr | |||
@lru_cache(_GLOBAL_LRU_CACHE_SIZE_NN) | |||
def _get_adaptive_max_pool3d_output_size(input_shape, output_size): | |||
if not isinstance(output_size, Iterable): | |||
output_size = [output_size, ] * 3 | |||
condition = [0,] * 3 | |||
if None in output_size: | |||
output_size = list(output_size) | |||
if output_size[0] is None: | |||
condition[0] = 1 | |||
output_size[0] = 0 | |||
if output_size[1] is None: | |||
condition[1] = 1 | |||
output_size[1] = 0 | |||
if output_size[2] is None: | |||
condition[2] = 1 | |||
output_size[2] = 0 | |||
_, _, d, h, w = input_shape | |||
out_d = output_size[0] + condition[0] * d | |||
out_h = output_size[1] + condition[1] * h | |||
out_w = output_size[2] + condition[2] * w | |||
return out_d, out_h, out_w | |||
@constexpr | |||
@lru_cache(_GLOBAL_LRU_CACHE_SIZE_NN) | |||
def _get_adaptive_max_pool3d_stride(input_shape, output_size): | |||
out_d, out_h, out_w = output_size | |||
_, _, d, h, w = input_shape | |||
stride_d = d // out_d | |||
kernel_d = d - (out_d - 1) * stride_d | |||
stride_h = h // out_h | |||
kernel_h = h - (out_h - 1) * stride_h | |||
stride_w = w // out_w | |||
kernel_w = w - (out_w - 1) * stride_w | |||
return kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w | |||
def adaptive_max_pool3d(input, output_size, return_indices=False): | |||
input = cast_to_ms_tensor(input) | |||
output = ms.ops.adaptive_max_pool3d(input, output_size, return_indices) | |||
input_shape = ms.ops.shape(input) | |||
_output_size = _get_adaptive_max_pool3d_output_size(input_shape, output_size) | |||
if is_under_ascend_context(): | |||
# TODO: Ascend not support ms.ops.adaptive_max_pool3d, use MaxPool3D instead | |||
# MaxPool3D result is not the same as adaptive_max_pool3d, but the shape. | |||
# Implement below do not affect the converge of trainning. | |||
if return_indices: | |||
raise NotImplementedError("For adaptive_max_pool3d, return_indices is not supported yet.") | |||
kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w = \ | |||
_get_adaptive_max_pool3d_stride(input_shape, _output_size) | |||
avg_pool = ms.ops.MaxPool3D(kernel_size=(kernel_d, kernel_h, kernel_w), | |||
strides=(stride_d, stride_h, stride_w), | |||
pad_mode="valid", data_format="NCDHW") | |||
output = avg_pool(input) | |||
else: | |||
output = ms.ops.adaptive_max_pool3d(input, _output_size, return_indices) | |||
return cast_to_adapter_tensor(output) | |||
def pad(input, pad, mode="constant", value=0): | |||
@@ -279,45 +279,16 @@ class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd): | |||
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd): | |||
def __init__(self, output_size, return_indices = False): | |||
def __init__(self, output_size, return_indices=False): | |||
# TODO: not support `return_indices` yet | |||
if return_indices and is_under_ascend_context(): | |||
raise NotImplementedError('AdaptiveMaxPool3d doesn\'t support return_indices on Ascend now.') | |||
super(AdaptiveMaxPool3d, self).__init__(output_size, return_indices) | |||
self.output_size = output_size | |||
self.shape = P.Shape() | |||
if not isinstance(self.output_size, Iterable): | |||
self.output_size = [self.output_size, ] * 3 | |||
self.condition = [0,] * 3 | |||
if None in self.output_size: | |||
self.output_size = list(self.output_size) | |||
if self.output_size[0] is None: | |||
self.condition [0] = 1 | |||
self.output_size[0] = 0 | |||
if self.output_size[1] is None: | |||
self.condition [1] = 1 | |||
self.output_size[1] = 0 | |||
if self.output_size[2] is None: | |||
self.condition[2] = 1 | |||
self.output_size[2] = 0 | |||
if return_indices: | |||
raise NotImplementedError('AdaptiveMaxPool3d doesn\'t support return_indices now.') | |||
self.return_indices = return_indices | |||
def forward(self, input): | |||
input = cast_to_ms_tensor(input) | |||
_, _, d, h, w = self.shape(input) | |||
out_d = self.output_size[0] + self.condition[0] * d | |||
out_h = self.output_size[1] + self.condition[1] * h | |||
out_w = self.output_size[2] + self.condition[2] * w | |||
stride_d = d // out_d | |||
kernel_d = d - (out_d - 1) * stride_d | |||
stride_h = h // out_h | |||
kernel_h = h - (out_h - 1) * stride_h | |||
stride_w = w // out_w | |||
kernel_w = w - (out_w - 1) * stride_w | |||
avg_pool = P.MaxPool3D(kernel_size=(kernel_d, kernel_h, kernel_w), | |||
strides=(stride_d, stride_h, stride_w), | |||
pad_mode="valid", data_format="NCDHW") | |||
outputs = avg_pool(input) | |||
return cast_to_adapter_tensor(outputs) | |||
outputs = Adapter_F.adaptive_max_pool3d(input, self.output_size, self.return_indices) | |||
return outputs | |||
class _LPPoolNd(Module): | |||
@@ -4,6 +4,7 @@ import torch | |||
from mindspore import Tensor | |||
from msadapter.pytorch.nn import AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d | |||
from msadapter.pytorch.nn import AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d | |||
from msadapter.utils import is_under_ascend_context | |||
from mindspore import context | |||
import mindspore as ms | |||
context.set_context(mode=ms.GRAPH_MODE) | |||
@@ -278,6 +279,9 @@ def test_adaptivemaxpool3d_compare1(): | |||
torch_output = torch_net(torch_input) | |||
print(ms_output.shape, torch_output.shape) | |||
assert np.allclose(ms_output.shape, torch_output.shape) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.numpy(), torch_output.numpy()) | |||
def test_adaptivemaxpool3d_compare2(): | |||
ms_net = AdaptiveMaxPool3d(3) | |||
@@ -291,6 +295,8 @@ def test_adaptivemaxpool3d_compare2(): | |||
torch_output = torch_net(torch_input) | |||
print(ms_output.shape, torch_output.shape) | |||
assert np.allclose(ms_output.shape, torch_output.shape) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.numpy(), torch_output.numpy()) | |||
def test_adaptivemaxpool3d_compare3(): | |||
ms_net = AdaptiveMaxPool3d(3) | |||
@@ -304,6 +310,8 @@ def test_adaptivemaxpool3d_compare3(): | |||
torch_output = torch_net(torch_input) | |||
print(ms_output.shape, torch_output.shape) | |||
assert np.allclose(ms_output.shape, torch_output.shape) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.numpy(), torch_output.numpy()) | |||
def test_adaptivemaxpool3d_compare4(): | |||
ms_net = AdaptiveMaxPool3d((3, None, 5)) | |||
@@ -317,6 +325,8 @@ def test_adaptivemaxpool3d_compare4(): | |||
torch_output = torch_net(torch_input) | |||
print(ms_output.shape, torch_output.shape) | |||
assert np.allclose(ms_output.shape, torch_output.shape) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.numpy(), torch_output.numpy()) | |||
def test_adaptivemaxpool3d_compare5(): | |||
ms_net = AdaptiveMaxPool3d((None, None, 5)) | |||
@@ -330,6 +340,8 @@ def test_adaptivemaxpool3d_compare5(): | |||
torch_output = torch_net(torch_input) | |||
print(ms_output.shape, torch_output.shape) | |||
assert np.allclose(ms_output.shape, torch_output.shape) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.numpy(), torch_output.numpy()) | |||
if __name__ == '__main__': | |||
test_adaptiveavgpool2d_compare1() | |||
@@ -5,6 +5,7 @@ from mindspore import Tensor | |||
from msadapter.pytorch.nn import MaxPool1d, MaxPool2d, MaxPool3d, AvgPool1d, AvgPool2d, AvgPool3d, \ | |||
AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d, AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, \ | |||
LPPool1d, LPPool2d, FractionalMaxPool2d, FractionalMaxPool3d | |||
from msadapter.utils import is_under_ascend_context | |||
import mindspore as ms | |||
ms.context.set_context(mode=ms.GRAPH_MODE) | |||
@@ -305,7 +306,8 @@ def test_adaptive_maxpool3d_compare1(): | |||
ms_output = ms_net(ms_input) | |||
assert (torch_output.shape == ms_output.shape) | |||
# TODO: assert np.allclose(ms_output.asnumpy(), torch_output.numpy()) | |||
if not is_under_ascend_context(): | |||
assert np.allclose(ms_output.asnumpy(), torch_output.numpy()) | |||
def test_adaptive_avgpool1d_compare1(): | |||