|
- """
- 继续训练功能:修改训练任务时,若勾选复用上次结果,则可在新训练任务的输出路径中读取到上次结果
-
- 示例用法
-
- - 增加两个训练参数
- 'ckpt_save_name' 此次任务的输出文件名称
- 'ckpt_load_name' 上一次任务的输出文件名,用于加载上一次输出的模型文件名称,默认为空,则不读取任何文件
-
- - 训练代码中判断 'ckpt_load_name' 是否为空
- 若不为空,则使用以下路径读取: "/tmp/output/" + args.ckpt_load_name + '.pkl'
- """
-
- from model import Model
- import numpy as np
- import torch
- from torchvision.datasets import mnist
- from torch.nn import CrossEntropyLoss
- from torch.optim import SGD
- from torch.utils.data import DataLoader
- from torchvision.transforms import ToTensor
- import argparse
- import os
-
- # Training settings
- parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
- #The dataset location is placed under /dataset
- parser.add_argument('--traindata', default="/dataset/train" ,help='path to train dataset')
- parser.add_argument('--testdata', default="/dataset/test" ,help='path to test dataset')
- parser.add_argument('--epoch_size', type=int, default=10, help='how much epoch to train')
- parser.add_argument('--batch_size', type=int, default=256, help='how much batch_size in epoch')
- #获取模型文件名称
- parser.add_argument('--ckpt_load_name', default="", help='pretrain model path')
- parser.add_argument('--ckpt_save_name', default="checkpoint", help='pretrain model path')
-
- # 参数声明
- WORKERS = 0 # dataloder线程数
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- model = Model().to(device)
- optimizer = SGD(model.parameters(), lr=1e-1)
- cost = CrossEntropyLoss()
-
- # 模型训练
- def train(model, train_loader, epoch):
- model.train()
- train_loss = 0
- for i, data in enumerate(train_loader, 0):
- x, y = data
- x = x.to(device)
- y = y.to(device)
- optimizer.zero_grad()
- y_hat = model(x)
- loss = cost(y_hat, y)
- loss.backward()
- optimizer.step()
- train_loss += loss
- loss_mean = train_loss / (i+1)
- print('Train Epoch: {}\t Loss: {:.6f}'.format(epoch, loss_mean.item()))
-
- # 模型测试
- def test(model, test_loader, test_data):
- model.eval()
- test_loss = 0
- correct = 0
- with torch.no_grad():
- for i, data in enumerate(test_loader, 0):
- x, y = data
- x = x.to(device)
- y = y.to(device)
- optimizer.zero_grad()
- y_hat = model(x)
- test_loss += cost(y_hat, y).item()
- pred = y_hat.max(1, keepdim=True)[1]
- correct += pred.eq(y.view_as(pred)).sum().item()
- test_loss /= (i+1)
- print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
- test_loss, correct, len(test_data), 100. * correct / len(test_data)))
-
- def main():
- # 如果有保存的模型,则加载模型,并在其基础上继续训练
- if args.ckpt_load_name != '':
- load_path = "/tmp/output/" + args.ckpt_load_name + '.pkl'
- checkpoint = torch.load(args.ckpt_url)
- model.load_state_dict(checkpoint['model'])
- optimizer.load_state_dict(checkpoint['optimizer'])
- start_epoch = checkpoint['epoch']
- print('加载 epoch {} 权重成功!'.format(start_epoch))
- else:
- start_epoch = 0
- print('无保存模型,将从头开始训练!')
-
- for epoch in range(start_epoch+1, epochs):
- train(model, train_loader, epoch)
- test(model, test_loader, test_dataset)
- # 保存模型
- state = {'model':model.state_dict(), 'optimizer':optimizer.state_dict(), 'epoch':epoch}
- torch.save(state, '/tmp/output/{}_{}.pkl'.format(args.ckpt_save_name, epoch))
- #After calling uploader_for_gpu, after each epoch training, the result file under /tmp/output will be sent back to Qizhi
- os.system("cd /tmp/script_for_grampus/ &&./uploader_for_gpu " + "/tmp/output/")
-
-
- if __name__ == '__main__':
- args, unknown = parser.parse_known_args()
- #log output
- print('cuda is available:{}'.format(torch.cuda.is_available()))
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- batch_size = args.batch_size
- epochs = args.epoch_size
- train_dataset = mnist.MNIST(root=args.traindata, train=True, transform=ToTensor(),download=False)
- test_dataset = mnist.MNIST(root=args.testdata, train=False, transform=ToTensor(),download=False)
- train_loader = DataLoader(train_dataset, batch_size=batch_size)
- test_loader = DataLoader(test_dataset, batch_size=batch_size)
- main()
-
|