|
- import numpy as np
- from scipy.signal import resample
- from torch.autograd import Variable
- import torch
- import torch.nn as nn
- from torch.utils.data import Dataset, ConcatDataset, DataLoader
- import glob
- import argparse
- import time
- import torchsummary
- import os
- # from model.SHMnet import *
- # from model.VGG_16 import *
- # from model.ResNet import *
- from data.labdata import *
- from model.mcftransformer import *
-
-
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--n_epochs', type=int, default=1000, help='number of epochs of training')
- parser.add_argument('--batch_size', type=int, default=32, help='size of the batches')
- parser.add_argument('--lr', type=float, default=0.0001, help='adam: learning rate')
- parser.add_argument('--b1', type=float, default=0.9, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--num_class', type=int, default=10, help='total structral conditions')
- parser.add_argument('--num_repeats', type=int, default=10, help='number of repeated tests for training')
- parser.add_argument('--path', type=str, default='/tmp/dataset',help='path of training data')
- parser.add_argument('--out_path', type=str, default='/tmp/output',help='path of output')
- parser.add_argument('--d_model', type=int, default=100,help='Input dimension')
- parser.add_argument('--d_ff', type=int, default=2048,help='Number of neurons in the feedforward layer')
- parser.add_argument('--d_k', type=int, default=64,help='K matrix dimension')
- parser.add_argument('--d_v', type=int, default=64,help='V matrix dimension')
- parser.add_argument('--n_layers', type=int, default=1,help='number of layers')
- parser.add_argument('--n_heads', type=int, default=8,help='number of heads')
- opt = parser.parse_args()
-
- cuda = True if torch.cuda.is_available() else False
- num_class = opt.num_class
- num_repeats = opt.num_repeats
- d_model = opt.d_model
- d_ff = opt.d_ff
- d_k = opt.d_k
- d_v = opt.d_v
- n_layers = opt.n_layers
- n_heads = opt.n_heads
- total = 0
- correct = 0
- acc = []
- acc1 = []
-
- ### SHMnet #####
- #model=SHMnet(num_class)
-
-
- ### Vgg16 ###
- #model=Vgg16(num_class)
-
- ### ResNet ###
- #model=ResNet18(num_class)
-
- device=torch.device("cuda")
- model=Transformer().to(device)
- print(model)
-
-
-
-
- learning_rate = opt.lr
- loss = nn.CrossEntropyLoss()
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(opt.b1, opt.b2))
-
-
- if cuda:
- model = model.cuda()
- loss = loss.cuda()
- FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
- LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
-
- traindataset = []
- for label in range(num_class):
- epdataset = epDataset(label, num_repeats)
- traindataset = ConcatDataset([traindataset, epdataset])
- testdataset = []
- for label in range(num_class):
- eptdataset = eptDataset(label, 40)
- testdataset = ConcatDataset([testdataset, eptdataset])
- dataloader = DataLoader(traindataset, batch_size=opt.batch_size, shuffle=True)
- viddataloader = DataLoader(testdataset, batch_size=opt.batch_size, shuffle=True)
- loss_r = []
- loss_vid = []
- start = time.time()
- #%%
- predicted1 = []
- label1 = []
- train_loss = []
- train_loss = []
- avg_train_losses = []
- avg_valid_losses = []
-
-
- out_dir_ckpt = os.path.join(opt.out_path, "ckpt") #checkpoint
- if not os.path.exists(out_dir_ckpt):
- os.makedirs(out_dir_ckpt)
- save_model_dir = os.path.join(out_dir_ckpt,"model.pth")
-
- for epoch in range(opt.n_epochs):
-
- print('Epoch: %d' % epoch)
- total = 0
- correct = 0
- total1 = 0
- correct1 = 0
-
- for i, (sdata, label) in enumerate(dataloader):
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- noise = Variable(torch.from_numpy(np.random.normal(1, 0.1, sdata.shape))).type(FloatTensor)
- x = torch.mul(sdata, noise).view(sdata.size(0), 1, sdata.size(1))
- label = Variable(label.type(LongTensor))
- optimizer.zero_grad()
- output = model(x)
- loss1 = loss(output, label)
- loss1.backward()
- optimizer.step()
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct += temp
- total += batch_size
- print(i, loss1.item())
- predicted1.append(predicted.cpu())
- label1.append(label.cpu())
- loss_r.append(loss1.item())
- print('Accuracy of training data is: %d %%' % (100 * correct / total))
- acc.append(100 * correct / total)
-
- for i, (sdata, label) in enumerate(viddataloader):
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- x = sdata.view(sdata.size(0), 1, sdata.size(1))
- label = Variable(label.type(LongTensor))
- loss2 = loss(model(x), label)
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct1 += temp
- total1 += batch_size
- loss_vid.append(loss2.item())
- print('Accuracy of testing data is: %d %%' % (100 * correct1 / total1))
- aaa=100 * (correct1 / total1).item()
- min_mae=60
- if aaa > min_mae:
- min_mae=aaa
- if os.path.exists(save_model_dir):
- os.remove(save_model_dir)
- print('存在文件,删除文件成功!!')
- print("save model")
- torch.save(model.state_dict(),save_model_dir)
-
- acc1.append(100 * correct1 / total1)
- train_loss = np.average(loss_r)
- valid_loss = np.average(loss_vid)
- avg_train_losses.append(train_loss)
- avg_valid_losses.append(valid_loss)
-
- epoch_len = len(str(opt.n_epochs))
- print_msg = (f'[{epoch:>{epoch_len}}/{opt.n_epochs:>{epoch_len}}] ' +f'train_loss: {train_loss:.5f} ' +f'valid_loss: {valid_loss:.5f}')
- print(print_msg)
- train_losses = []
- valid_losses = []
-
- end = time.time()
- print(end - start)
- #%%
|