|
- from mindspore.train.serialization import load_param_into_net, load_checkpoint
- from var_init import default_recurisive_init, KaimingNormal
- from mindspore.common import initializer as init
- import math
- import mindspore_hub as mshub
- from mindspore import Tensor
- from mindspore import nn
- from mindspore import context
- from resnet.src.resnet import resnet50
- from mindspore.train.serialization import load_checkpoint, load_param_into_net
-
-
- def get_resnet50(pretrain=True):
- resnet = resnet50(class_num=1001)
-
- if pretrain:
- load_path = 'resnet50_ascend.ckpt'
- param_dict = load_checkpoint(load_path)
- load_param_into_net(resnet, param_dict)
-
- # resnet.set_train(False)
-
- return resnet
-
-
- class _DecoderBlock(nn.Cell):
- def __init__(self, in_channels, middle_channels, out_channels):
- super(_DecoderBlock, self).__init__()
- self.decode = nn.SequentialCell(
- nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1, pad_mode="pad", has_bias=True),
- nn.BatchNorm2d(middle_channels),
- nn.ReLU(),
- nn.Conv2dTranspose(middle_channels, out_channels, kernel_size=2, stride=2, has_bias=True, pad_mode='valid'),
- )
-
- def construct(self, x):
- return self.decode(x)
-
-
- class UCSRNet(nn.Cell):
- def __init__(self, classes, use_pretrained=False):
- super(UCSRNet, self).__init__()
-
- # Backend
- self.relu = nn.ReLU() # 这部分对应pytorch版本output处理
- self.backend_feat = [512, 512, 512, 256, 128, 64]
- self.back_in_channels = 512
- self.backend = make_layers(self.backend_feat, in_channels=self.back_in_channels)
-
- # Decode Layer
- self.dec4 = _DecoderBlock(64, 64, 32)
- self.dec3 = _DecoderBlock(32, 32, 16)
- self.dec2 = _DecoderBlock(16, 16, 8)
- self.dec1 = nn.SequentialCell(
- nn.Conv2d(8, 4, kernel_size=1, has_bias=True, pad_mode='valid'),
- nn.BatchNorm2d(4),
- nn.ReLU(),
- nn.Conv2d(4, 4, kernel_size=1, has_bias=True, pad_mode='valid'),
- nn.BatchNorm2d(4),
- nn.ReLU())
- # Output Layer
- self.output_layer = nn.Conv2d(4, classes, kernel_size=1, has_bias=True, pad_mode='valid')
- # Frontend
- self.custom_init_weight()
- mod = get_resnet50(pretrain=True)
- self.frontend = nn.SequentialCell(list(mod.cells())[:5])
- pass
-
- def construct(self, x):
- x = self.frontend(x) # (2, 512, 30, 30)
- x = self.backend(x)
- x = self.dec4(x)
- x = self.dec3(x)
- x = self.dec2(x)
- x = self.dec1(x)
- x = self.output_layer(x)
- x = self.relu(x)
- return x
-
- def custom_init_weight(self):
- """
- Init the weight of Conv2d and Dense in the net.
- """
- for _, cell in self.cells_and_names():
- if isinstance(cell, nn.Conv2d):
- cell.weight.set_data(init.initializer(
- KaimingNormal(a=math.sqrt(5), mode='fan_out'),
- cell.weight.shape, cell.weight.dtype))
- if cell.bias is not None:
- cell.bias.set_data(init.initializer(
- 'zeros', cell.bias.shape, cell.bias.dtype))
- elif isinstance(cell, nn.Dense):
- cell.weight.set_data(init.initializer(
- init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
- if cell.bias is not None:
- cell.bias.set_data(init.initializer(
- 'zeros', cell.bias.shape, cell.bias.dtype))
-
-
- def make_layers(cfg, in_channels=3, dilation=True, dropout=True, dropout_rate=0.5):
- if dilation:
- d_rate = 2
- else:
- d_rate = 1
- layers = []
- for v in cfg:
- if v == 'M':
- layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
- else:
- conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate, dilation=d_rate, pad_mode="pad",
- has_bias=True)
- layers += [conv2d, nn.ReLU()]
- if dropout:
- layers += [nn.Dropout(keep_prob=1 - dropout_rate)]
- in_channels = v
- return nn.SequentialCell(*layers)
-
-
- if __name__ == '__main__':
- import numpy as np
- from mindspore import Tensor
-
- net = UCSRNet(classes=2)
- x = np.random.randn(2, 3, 256, 256).astype(np.float32)
- x = Tensor(x)
- out = net(x)
- print(out.shape)
|