@@ -3,6 +3,7 @@
"""Functional interface"""
import math
import warnings
from typing import Iterable
from functools import lru_cache
import numpy as np
import mindspore as ms
@@ -12,7 +13,7 @@ from mindspore.ops.operations.nn_ops import TripletMarginLoss as TripletMarginLo
from mindspore.ops._primitive_cache import _get_cache_prim
from mindspore.ops.function.math_func import _expand, _check_same_type
from msadapter.utils import unsupported_attr, _GLOBAL_LRU_CACHE_SIZE_NN
from msadapter.utils import unsupported_attr, _GLOBAL_LRU_CACHE_SIZE_NN, is_under_ascend_context
from msadapter.pytorch.tensor import Tensor, cast_to_ms_tensor, cast_to_adapter_tensor
from msadapter.pytorch.common._inner import _inplace_assign_pynative
from msadapter.pytorch.common.dtype import all_int_type
@@ -165,9 +166,65 @@ def adaptive_max_pool2d(input, output_size, return_indices=False):
output = ms.ops.adaptive_max_pool2d(input, output_size, return_indices)
return cast_to_adapter_tensor(output)
@constexpr
@lru_cache(_GLOBAL_LRU_CACHE_SIZE_NN)
def _get_adaptive_max_pool3d_output_size(input_shape, output_size):
if not isinstance(output_size, Iterable):
output_size = [output_size, ] * 3
condition = [0,] * 3
if None in output_size:
output_size = list(output_size)
if output_size[0] is None:
condition[0] = 1
output_size[0] = 0
if output_size[1] is None:
condition[1] = 1
output_size[1] = 0
if output_size[2] is None:
condition[2] = 1
output_size[2] = 0
_, _, d, h, w = input_shape
out_d = output_size[0] + condition[0] * d
out_h = output_size[1] + condition[1] * h
out_w = output_size[2] + condition[2] * w
return out_d, out_h, out_w
@constexpr
@lru_cache(_GLOBAL_LRU_CACHE_SIZE_NN)
def _get_adaptive_max_pool3d_stride(input_shape, output_size):
out_d, out_h, out_w = output_size
_, _, d, h, w = input_shape
stride_d = d // out_d
kernel_d = d - (out_d - 1) * stride_d
stride_h = h // out_h
kernel_h = h - (out_h - 1) * stride_h
stride_w = w // out_w
kernel_w = w - (out_w - 1) * stride_w
return kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w
def adaptive_max_pool3d(input, output_size, return_indices=False):
input = cast_to_ms_tensor(input)
output = ms.ops.adaptive_max_pool3d(input, output_size, return_indices)
input_shape = ms.ops.shape(input)
_output_size = _get_adaptive_max_pool3d_output_size(input_shape, output_size)
if is_under_ascend_context():
# TODO: Ascend not support ms.ops.adaptive_max_pool3d, use MaxPool3D instead
# MaxPool3D result is not the same as adaptive_max_pool3d, but the shape.
# Implement below do not affect the converge of trainning.
if return_indices:
raise NotImplementedError("For adaptive_max_pool3d, return_indices is not supported yet.")
kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w = \
_get_adaptive_max_pool3d_stride(input_shape, _output_size)
avg_pool = ms.ops.MaxPool3D(kernel_size=(kernel_d, kernel_h, kernel_w),
strides=(stride_d, stride_h, stride_w),
pad_mode="valid", data_format="NCDHW")
output = avg_pool(input)
else:
output = ms.ops.adaptive_max_pool3d(input, _output_size, return_indices)
return cast_to_adapter_tensor(output)
def pad(input, pad, mode="constant", value=0):