|
- import torch.nn as nn
- import numpy as np
- import torch
-
-
- import torch.nn.functional as F
- from torchvision import datasets, transforms
-
-
- class LeNet(nn.Module):
- def __init__(self, num_classes=10):
- super(LeNet, self).__init__()
- self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
- self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
- self.relu = nn.ReLU()
- self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
- self.maxpool2 = nn.MaxPool2d(kernel_size=2)
- self.flatten = nn.Flatten(start_dim=1)
- self.fc1 = nn.Linear(16 * 4 * 4, 120)
- self.fc2 = nn.Linear(120, 84)
- self.fc3 = nn.Linear(84, num_classes)
-
- def forward(self, x):
- x = self.conv1(x) # input(1, 1,28, 28) output(1, 6, 24, 24)
- x = self.relu(x)
- x = self.maxpool1(x) # output(1, 6, 12, 12)
- x = self.conv2(x) # output(1, 16, 8, 8)
- x = self.relu(x)
- x = self.maxpool2(x) # output(1, 16, 4, 4)
- x = self.flatten(x) # output(1, 256)
- x = self.fc1(x) # output(120)
- x = self.relu(x)
- x = self.fc2(x) # output(84)
- x = self.relu(x)
- x = self.fc3(x)
- x = F.log_softmax(x, dim=1) # 计算log(softmax(x))
- return x
-
-
- data = np.ones([1, 1, 28, 28], dtype=np.int64)
- input= torch.Tensor(data)
- # net = LeNet()
- # print(net(input))
-
-
- def train(model, train_loader, optimizer, epoch):
- model.train()
- for idx, (data, target) in enumerate(train_loader):
- data, target = data.to('cpu'), target.to('cpu')
- optimizer.zero_grad()
- pred = model(data)
- loss = F.nll_loss(pred, target)
- loss.backward()
- optimizer.step()
- if idx % 200 == 0:
- print("Train Epoch: {}, iterantion: {}, Loss: {}".format(epoch, idx, loss.item()))
-
-
- def test(model, test_loader):
- model.eval()
- total_loss = 0.
- correct = 0.
- with torch.no_grad():
- for idx, (data, target) in enumerate(test_loader):
- data, target = data.to('cpu'), target.to('cpu')
-
- output = model(data)
- total_loss += F.nll_loss(output, target, reduction="sum").item()
- pred = output.argmax(dim=1)
- correct += pred.eq(target.view_as(pred)).sum().item()
-
- total_loss /= len(test_loader.dataset)
- acc = correct / len(test_loader.dataset) * 100
- print("Test loss: {}, Accuracy: {}".format(total_loss, acc))
-
-
- batch_size = 32
- train_dataloader = torch.utils.data.DataLoader(
- datasets.MNIST("../../MNIST_data", train=True, download=True,
- transform=transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.1307,), (0.3081,))
- ])), batch_size=batch_size, shuffle=True)
-
- test_dataloader = torch.utils.data.DataLoader(
- datasets.MNIST("../../MNIST_data", train=False, download=True,
- transform = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.1307,), (0.3081,))
- ])), batch_size =batch_size, shuffle = True)
-
-
- lr = 0.01
- momentum = 0.5
- model = LeNet().to("cpu")
- optimizer = torch.optim.Adam(model.parameters())
-
- num_epoch = 1
- for epoch in range(num_epoch):
- train(model, train_dataloader, optimizer, epoch)
- test(model, test_dataloader)
|