#36 modify lenet network

Merged
Manson merged 1 commits from Manson/MSAdapter:mod_lenet into master 1 year ago
  1. +2
    -0
      ms_adapter/pytorch/nn/modules/conv.py
  2. +0
    -36
      testing/dataset.py
  3. +67
    -24
      testing/network/test_lenet.py
  4. +16
    -16
      testing/torch_lenet.py

+ 2
- 0
ms_adapter/pytorch/nn/modules/conv.py View File

@@ -127,6 +127,8 @@ class Conv2d(_ConvNd):
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
if padding == 0:
self.pad_mode = 'valid'
if isinstance(self.padding, str):
self.pad_mode = self.padding
self.padding = 0


+ 0
- 36
testing/dataset.py View File

@@ -1,36 +0,0 @@
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore.common import dtype as mstype


def create_dataset(data_path, batch_size=32, num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)

resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081

# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml * rescale, shift_nml)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)

# apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)

# apply DatasetOps
mnist_ds = mnist_ds.shuffle(buffer_size=1024)
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)

return mnist_ds

+ 67
- 24
testing/network/test_lenet.py View File

@@ -1,7 +1,17 @@
import ms_adapter.pytorch.nn

import numpy as np
import os

from mindspore import Tensor
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore.common import dtype as mstype
import mindspore.nn as nn
from mindspore import context
from mindspore.train.callback import LossMonitor, TimeMonitor
from mindspore.train import Model
from mindspore.nn.metrics import Accuracy

import ms_adapter.pytorch.functional as F
from ms_adapter.pytorch.nn.modules.pooling import MaxPool2d
@@ -11,18 +21,46 @@ from ms_adapter.pytorch.nn.modules.flatten import Flatten
from ms_adapter.pytorch.nn.modules.conv import Conv2d
from ms_adapter.pytorch.nn.modules.module import Module

context.set_context(mode=context.PYNATIVE_MODE)


def create_dataset(data_path, batch_size=32, num_parallel_workers=1):
"""
create dataset for train or test
"""
# define dataset
mnist_ds = ds.MnistDataset(data_path)

resize_height, resize_width = 28, 28
rescale = 1.0 / 255.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081

# define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode
rescale_nml_op = CV.Rescale(rescale_nml * rescale, shift_nml)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)

# apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)

# apply DatasetOps
mnist_ds = mnist_ds.shuffle(buffer_size=1024)
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
return mnist_ds


class LeNet(Module):
def __init__(self, num_classes=10):
super(LeNet, self).__init__()
self.conv1 = Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=0)
self.maxpool1 = MaxPool2d(kernel_size=2, stride=2)
self.relu = ReLU()
self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)
self.conv1 = Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1)
self.conv1 = Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.maxpool1 = MaxPool2d(kernel_size=2, stride=2)
self.relu = ReLU()
self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1)
self.conv2 = Conv2d(in_channels=6, out_channels=16, kernel_size=5)
self.maxpool2 = MaxPool2d(kernel_size=2, stride=2)
self.flatten = Flatten(start_dim=1)
self.fc1 = Linear(16*4*4, 120)
@@ -30,25 +68,30 @@ class LeNet(Module):
self.fc3 = Linear(84, num_classes)

def forward(self, x):
x = self.conv1(x) # input(1, 32, 32) output(6, 28, 28)
x = self.relu(x) # 激活函数
x = self.maxpool1(x) # output(6, 14, 14)
x = self.conv2(x) # output(16, 10, 10)
x = self.relu(x) # 激活函数
x = self.maxpool2(x) # output(16, 5, 5)
x = self.flatten(x) # output(16*5*5) N代表batch_size
x = self.fc1(x) # output(120)
x = self.relu(x) # 激活函数
x = self.conv1(x) # input(3, 32, 32) output(1, 6, 24, 24)
x = self.relu(x)
x = self.maxpool1(x) # output(1, 6, 12, 12)
x = self.conv2(x) # output(1, 16, 8, 8)
x = self.relu(x)
x = self.maxpool2(x) # output(1, 16, 4, 4)
x = self.flatten(x) # output(1, 256)
x = self.fc1(x) # output(1, 120)
x = self.relu(x)
x = self.fc2(x) # output(84)
x = self.relu(x) # 激活函数
x = self.fc3(x) # output(num_classes)
x = F.log_softmax(x, dim=1) # 计算log(softmax(x))
x = self.relu(x)
x = self.fc3(x) # output(10)
return x


a = np.ones([1, 1, 28, 28], dtype=np.float32)
input1 = Tensor(a)
input1 = Tensor(np.ones([1, 1, 28, 28], dtype=np.float32))
net = LeNet()
aa = net(input1)
# output = net(input1)
# print(output)

print(aa)
# eg: "/Users/czr/work/ms/push_ms/mnist/train"
ds_train = create_dataset("PATH/TO/MNIST/DATA", 32)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
time_cb = TimeMonitor()
model = Model(net, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
model.train(2, ds_train, callbacks=[time_cb, LossMonitor(200)])

+ 16
- 16
testing/torch_lenet.py View File

@@ -21,26 +21,26 @@ class LeNet(nn.Module):
self.fc3 = nn.Linear(84, num_classes)

def forward(self, x):
x = self.conv1(x) # input(3, 32, 32) output(16, 28, 28)
x = self.relu(x) # 激活函数
x = self.maxpool1(x) # output(16, 14, 14)
x = self.conv2(x) # output(32, 10, 10)
x = self.relu(x) # 激活函数
x = self.maxpool2(x) # output(32, 5, 5)
x = self.flatten(x) # output(32*5*5) N代表batch_size
x = self.conv1(x) # input(1, 1,28, 28) output(1, 6, 24, 24)
x = self.relu(x)
x = self.maxpool1(x) # output(1, 6, 12, 12)
x = self.conv2(x) # output(1, 16, 8, 8)
x = self.relu(x)
x = self.maxpool2(x) # output(1, 16, 4, 4)
x = self.flatten(x) # output(1, 256)
x = self.fc1(x) # output(120)
x = self.relu(x) # 激活函数
x = self.relu(x)
x = self.fc2(x) # output(84)
x = self.relu(x) # 激活函数
x = self.fc3(x) # output(num_classes)
x = self.relu(x)
x = self.fc3(x)
x = F.log_softmax(x, dim=1) # 计算log(softmax(x))
return x


data = np.ones([1, 1, 32, 32], dtype=np.int64)
data = np.ones([1, 1, 28, 28], dtype=np.int64)
input= torch.Tensor(data)
net = LeNet()
#print(net(input))
# net = LeNet()
# print(net(input))


def train(model, train_loader, optimizer, epoch):
@@ -52,7 +52,7 @@ def train(model, train_loader, optimizer, epoch):
loss = F.nll_loss(pred, target)
loss.backward()
optimizer.step()
if idx % 100 == 0:
if idx % 200 == 0:
print("Train Epoch: {}, iterantion: {}, Loss: {}".format(epoch, idx, loss.item()))


@@ -86,7 +86,7 @@ test_dataloader = torch.utils.data.DataLoader(
datasets.MNIST("../../MNIST_data", train=False, download=True,
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size =batch_size, shuffle = True)


@@ -95,7 +95,7 @@ momentum = 0.5
model = LeNet().to("cpu")
optimizer = torch.optim.Adam(model.parameters())

num_epoch = 2
num_epoch = 1
for epoch in range(num_epoch):
train(model, train_dataloader, optimizer, epoch)
test(model, test_dataloader)

Loading…
Cancel
Save