|
- import numpy as np
- from scipy.signal import resample
- import matplotlib.pyplot as plt
- from torch.autograd import Variable
- import torch
- import torch.nn as nn
- from torch.utils.data import Dataset, ConcatDataset, DataLoader
- import glob
- import argparse
- import time
- #from pytorchtools import EarlyStopping
- import torchsummary
- from matplotlib.pyplot import MultipleLocator
- from model.Vgg16 import *
- import os
-
- def prep(dataset):
- norm_size = 5000
- xx = resample(dataset, norm_size)
- return xx
-
- class epDataset(Dataset):
- """Sensoring dataset."""
-
- def __init__(self, label, num_repeats, Normalise=True):
- self.list_IDs = np.arange(num_repeats)
- self.label = label
- self.transform = Normalise
-
- def __len__(self):
- return len(self.list_IDs)
-
- def __getitem__(self, idx):
- sdata = np.loadtxt('/dataset/data-target/X_' +str(self.label)+'_1_'+ str(idx +1) +'.txt')
- sdata = prep(sdata)
- label = self.label
- return sdata, label
-
-
- class eptDataset(Dataset):
- """Sensoring dataset."""
-
- def __init__(self, label, num_repeats, Normalise=True):
- self.list_IDs = np.arange(num_repeats)
- self.label = label
- self.transform = Normalise
-
- def __len__(self):
- return len(self.list_IDs)
-
- def __getitem__(self, idx):
- sdata = np.loadtxt('/dataset/data-target/X_' + str(self.label) + '_1_' + str(idx +9) + '.txt')
- sdata = prep(sdata)
- label = self.label
- return sdata, label
-
-
- class SHMnet(nn.Module):
- def __init__(self,num_classes=37):
- super(SHMnet,self).__init__()
- self.features = nn.Sequential(
- nn.Conv1d(1, 16, kernel_size=7),
- nn.ReLU(),
- nn.MaxPool1d(kernel_size=3, stride=2),
- nn.Conv1d(16, 64, kernel_size=5),
- nn.ReLU(),
- nn.MaxPool1d(kernel_size=3, stride=2),
- nn.Conv1d(64, 256, kernel_size=3),
- nn.ReLU(),
- nn.MaxPool1d(kernel_size=3, stride=2),
- nn.Conv1d(256, 512, kernel_size=3),
- nn.ReLU(),
- nn.MaxPool1d(kernel_size=3, stride=2),
- nn.Conv1d(512, 1024, kernel_size=3),
- nn.ReLU(),
- nn.MaxPool1d(kernel_size=3, stride=2),
- )
- self.classifier = nn.Sequential(
- nn.Dropout(),
- nn.Linear(1024 * 153, 2048),
- nn.ReLU(inplace=True),
- nn.Dropout(),
- nn.Linear(2048, 1024),
- nn.ReLU(inplace=True),
- nn.Linear(1024, num_classes),
- )
-
- def forward(self, x):
- x = self.features(x)
- #print ( x.shape)
- x = x.view(x.size(0), -1)
- x = self.classifier(x)
- return x
-
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--n_epochs', type=int, default=1000, help='number of epochs of training')
- parser.add_argument('--batch_size', type=int, default=32, help='size of the batches')
- parser.add_argument('--lr', type=float, default=0.0001, help='adam: learning rate')
- parser.add_argument('--b1', type=float, default=0.9, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--num_class', type=int, default=11, help='total structral conditions')
- parser.add_argument('--num_repeats', type=int, default=8, help='number of repeated tests for training')
- parser.add_argument('--path', type=str, default='/dataset/baonengxin/data-target',help='path of training data')
- parser.add_argument('--out_path', type=str, default='/model',help='path of output')
- opt = parser.parse_args()
-
- cuda = True if torch.cuda.is_available() else False
- num_class = opt.num_class
- num_repeats = opt.num_repeats
- total = 0
- correct = 0
- acc = []
- acc1 = []
-
- model=Vgg16_net(37)
- model.load_state_dict(torch.load('/dataset/model.pth'))
- #print(list(model.parameters()))
-
- #%% transfer strategies 1
- model.fc = nn.Sequential(*list(model.fc.children())[:-1])
- model.fc.add_module("6", nn.Linear(256, 11))
- for p in model.conv.parameters():
- p.requires_grad=True
- for p in model.fc.parameters():
- p.requires_grad=True
- #print(model)
- print(list(model.parameters()))
-
- learning_rate = opt.lr
- loss = nn.CrossEntropyLoss()
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(opt.b1, opt.b2))
-
-
- if cuda:
- model = model.cuda()
- loss = loss.cuda()
- FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
- LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
-
- traindataset = []
- for label in range(num_class):
- epdataset = epDataset(label, num_repeats)
- traindataset = ConcatDataset([traindataset, epdataset])
- testdataset = []
- for label in range(num_class):
- eptdataset = eptDataset(label, 2)
- testdataset = ConcatDataset([testdataset, eptdataset])
- dataloader = DataLoader(traindataset, batch_size=opt.batch_size, shuffle=True)
- viddataloader = DataLoader(testdataset, batch_size=opt.batch_size, shuffle=True)
- loss_r = []
- loss_vid = []
- start = time.time()
- #%%
- predicted1 = []
- label1 = []
- train_loss = []
- train_loss = []
- avg_train_losses = []
- avg_valid_losses = []
- #early_stopping = EarlyStopping(patience=5, verbose=True,path='transfer_model_params_SHMnet2.0.pt')
-
- out_dir_ckpt = os.path.join(opt.out_path, "ckpt") #checkpoint
- if not os.path.exists(out_dir_ckpt):
- os.makedirs(out_dir_ckpt)
- save_model_dir = os.path.join(out_dir_ckpt,"model.pth")
-
- for epoch in range(opt.n_epochs):
-
- print('Epoch: %d' % epoch)
- total = 0
- correct = 0
- total1 = 0
- correct1 = 0
-
- for i, (sdata, label) in enumerate(dataloader):
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- noise = Variable(torch.from_numpy(np.random.normal(1, 0.1, sdata.shape))).type(FloatTensor)
- x = torch.mul(sdata, noise).view(sdata.size(0), 1, sdata.size(1))
- label = Variable(label.type(LongTensor))
- optimizer.zero_grad()
- output = model(x)
- loss1 = loss(output, label)
- loss1.backward()
- optimizer.step()
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct += temp
- total += batch_size
- print(i, loss1.item())
- predicted1.append(predicted.cpu())
- label1.append(label.cpu())
- loss_r.append(loss1.item())
- print('Accuracy of training data is: %d %%' % (100 * correct / total))
- acc.append(100 * correct / total)
-
- for i, (sdata, label) in enumerate(viddataloader):
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- x = sdata.view(sdata.size(0), 1, sdata.size(1))
- label = Variable(label.type(LongTensor))
- loss2 = loss(model(x), label)
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct1 += temp
- total1 += batch_size
- loss_vid.append(loss2.item())
- print('Accuracy of testing data is: %d %%' % (100 * correct1 / total1))
- acc1.append(100 * correct1 / total1)
- train_loss = np.average(loss_r)
- valid_loss = np.average(loss_vid)
- avg_train_losses.append(train_loss)
- avg_valid_losses.append(valid_loss)
-
- epoch_len = len(str(opt.n_epochs))
- print_msg = (f'[{epoch:>{epoch_len}}/{opt.n_epochs:>{epoch_len}}] ' +f'train_loss: {train_loss:.5f} ' +f'valid_loss: {valid_loss:.5f}')
- print(print_msg)
- train_losses = []
- valid_losses = []
- #early_stopping(valid_loss, model)
-
- #if early_stopping.early_stop:
- #print("Early stopping")
- #break
-
- #model.load_state_dict(torch.load('transfer_model_params_SHMnet2.0.pt'))
- end = time.time()
- print(end - start)
- #%%
- torch.save(model.state_dict(), save_model_dir)
- #%%
- #acc_draw = []
- #acc1_draw = []
- #for i in range(1000):
- #acc_draw.append(acc[i].cpu())
- #acc1_draw.append(acc1[i].cpu())
-
- #x1 = range(0, 1000)
- #plt.plot(x1, acc1_draw, label='Testing Accuracy', linewidth=1, color='r')
- #plt.plot(x1, acc_draw, label='Training Accuracy', linewidth=1, color='g')
- #plt.xlabel('Epoch', fontsize=20)
- #plt.ylabel('Accuracy(%)', fontsize=20)
- #x_major_locator = MultipleLocator(100)
- #y_major_locator = MultipleLocator(10)
- #plt.xticks(fontsize=12)
- #plt.yticks(fontsize=12)
- #ax = plt.gca()
- #ax.xaxis.set_major_locator(x_major_locator)
- #ax.yaxis.set_major_locator(y_major_locator)
- #plt.legend(fontsize=12)
- #plt.show()
-
- #x2 = range(0, 1000)
- #plt.plot(x2, loss_r, label='Loss', linewidth=1, color='b')
- # plt.plot(x2, loss_vid, label='Loss', linewidth=1, color='b')
- # plt.xlabel('Epoch', fontsize=20)
- # plt.ylabel('Loss', fontsize=20)
- # x_major_locator = MultipleLocator(100)
- # y_major_locator = MultipleLocator(0.1)
- # plt.xticks(fontsize=12)
- # plt.yticks(fontsize=12)
- # ax = plt.gca()
- # ax.xaxis.set_major_locator(x_major_locator)
- # ax.yaxis.set_major_locator(y_major_locator)
- # plt.legend(fontsize=12)
- # plt.show()
-
-
- # np.savetxt('acc_train.txt', acc_draw)
- # np.savetxt('acc_test.txt', acc1_draw)
- # np.savetxt('loss_train.txt', loss_r)
- # np.savetxt('loss_test.txt', loss_vid)
- # #%%
- # fig, ax1 = plt.subplots()
- # x1 = range(1000)
- # color = 'tab:blue'
- # ax1.set_xlabel('Epoch')
- # ax1.set_ylabel('Accuracy(%)', color=color)
- # ax1.plot(x1, acc_draw, color=color, label='Training Accuracy')
- # ax1.plot(x1, acc1_draw, color=color,label='Testing Accuracy',linestyle='dashed')
- # ax1.set_ylim(ymin=40, ymax=100)
- # ax1.tick_params(axis='y', labelcolor=color)
- # plt.legend(loc=(0.6, 0.35))
-
- # ax2 = ax1.twinx()
- # color = 'tab:red'
- # ax2.set_ylabel('Loss', color=color)
- # ax2.plot(x1, loss_r, color=color, label='Training Loss')
- # ax2.plot(x1, loss_vid, color=color,label='Testing Loss',linestyle='dashed')
- # ax2.tick_params(axis='y', labelcolor=color)
- # plt.legend(loc=(0.6, 0.20))
- # fig.tight_layout()
- # plt.show()
|