|
- import torch
- import torch.nn as nn
- from scipy import signal
- import numpy as np
- from data import get_data, prep
- from torch.autograd import Variable
- # from model.SHMnet import *
- # from model.LSTM import *
- # from model.ResNet import *
- from model.Bert.SHM_bert import *
- from model.Vgg16 import *
- from dataset.SHM import *
- import glob
- import argparse
- import time
- import os
-
- def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument('--n_epochs', type=int, default=600, help='number of epochs of training')
- parser.add_argument('--batch_size', type=int, default=60, help='size of the batches')
- parser.add_argument('--lr', type=float, default=0.0001, help='adam: learning rate')
- parser.add_argument('--b1', type=float, default=0.9, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
- parser.add_argument('--num_class', type=int, default=83, help='total structral conditions')
- parser.add_argument('--num_train', type=int, default=6, help='number of repeated tests for training')
- parser.add_argument('--num_validate', type=int, default=4, help='number of repeated tests for training')
- # parser.add_argument('--num_test', type=int, default=4, help='number of repeated tests for training')
- parser.add_argument('--data_path', type=str, default='/tmp/dataset',help='path of data')
- parser.add_argument('--out_path', type=str, default='/tmp/output',help='path of output')
- parser.add_argument('--sensor', type=str, default='Acceleration.csv',help='Acceleration_1~Acceleration_7')
- parser.add_argument('--u', type=float, default=1.0,help='mean value for noise')
- parser.add_argument('--std', type=float, default=0.2,help='standard deviation for noise')
- # parser.add_argument('--model_name', type=str, default='3storeys/Three_storeys', help="Four_storeys or Three_stores")
- parser.add_argument('--model_name', type=str, default='Four_storeys', help="Four_storeys or Three_storeys")
- return parser
-
-
- def main(opt):
- cuda = True if torch.cuda.is_available() else False
- num_class = opt.num_class
- data_path = opt.data_path
- sensor = opt.sensor
- total=0
- correct=0
- # acc=[]
- # acc1=[]
-
- #### SHMnet ########
- # model = SHMnet(num_class)
-
- #### Transformer ########
- model = Bert_base()
- # model = hubert_large(num_class)
- # model = hubert_huge(num_class)
-
- #### Vgg16_net ########
- # model = Vgg16_net(num_class)
-
- #### ResNet ########
- # model = ResNet18(num_class)
- # model = ResNet34(num_class)
- # model = ResNet50(num_class)
- # model = ResNet101(num_class)
- # model = ResNet152(num_class)
-
- #### LSTM ########
- # model = LSTM_classification(num_class, seq_l=5000, input_size=1)
-
- learning_rate = opt.lr
- print(model)
- loss = nn.CrossEntropyLoss()
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,betas=(opt.b1,opt.b2))
- if cuda:
- model=model.cuda()
- loss=loss.cuda()
- FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
- LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
-
- path = os.path.join(data_path, opt.model_name)
- traindataset = []
- for label in range(num_class):
- epdataset = epDataset(label,opt.num_train,path,sensor)
- traindataset = ConcatDataset([traindataset,epdataset])
- validataset =[]
- for label in range(num_class):
- epvdataset = epvDataset(label,opt.num_validate,16,path,sensor)
- validataset = ConcatDataset([validataset,epvdataset])
- # testdataset =[]
- # for label in range(num_class):
- # eptdataset = eptDataset(label,opt.num_test,opt.num_validate,opt.num_train,path,sensor)
- # testdataset = ConcatDataset([testdataset,eptdataset])
- dataloader = DataLoader (traindataset, batch_size=opt.batch_size, shuffle=True)
- viddataloader = DataLoader (validataset,batch_size = opt.batch_size)
- # testdataloader = DataLoader (testdataset,batch_size = opt.batch_size)
- out_dir_ckpt = os.path.join(opt.out_path, "ckpt")
- if not os.path.exists(out_dir_ckpt):
- os.makedirs(out_dir_ckpt)
- save_model_dir = os.path.join(out_dir_ckpt,"model.pth")
- loss_r=[]
- start = time.time()
- min_mae = 80
- for epoch in range(opt.n_epochs):
- start1 = time.time()
- print('Epoch: %d' % epoch)
- total = 0
- correct = 0
- total1 = 0
- correct1 = 0
- for i, (sdata, label) in enumerate(dataloader):
- model.train()
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- noise=Variable(torch.from_numpy( np.random.normal(opt.u, opt.std, sdata.shape))).type(FloatTensor)
- x = torch.mul(sdata,noise).view(sdata.size(0),1,sdata.size(1))
- label = Variable (label.type (LongTensor))
- optimizer.zero_grad()
- output = model(x)
- loss1 = loss(output, label)
- loss1.backward()
- optimizer.step()
- model.eval()
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct += temp
- total += batch_size
- # print(i, loss1.item())
- loss_r.append(loss1.item())
- print('Accuracy of training data is: %f %%' % (100 * correct / total))
- # acc.append(100 * (correct / total).item())
-
- for i, (sdata, label) in enumerate(viddataloader):
- model.eval()
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
-
- x = sdata.view(sdata.size(0),1,sdata.size(1))
- label = Variable (label.type (LongTensor))
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct1+= temp
- total1+= batch_size
- print ('Accuracy of validating data is: %f %%' % (100 * correct1 / total1))
- aaa = 100 * (correct1 / total1).item()
- # acc1.append(aaa)
- end1 = time.time()
- print(end1 - start1)
-
-
- if aaa > min_mae and epoch > 100:
- min_mae = aaa
- if os.path.exists(save_model_dir):
- os.remove(save_model_dir)
- print('存在文件,删除文件成功!!')
- print("save model")
- torch.save(model.state_dict(),save_model_dir)
-
-
-
-
- total = 0
- correct = 0
- total1 = 0
- correct1 = 0
- total2 = 0
- correct2 = 0
- model.load_state_dict(torch.load(save_model_dir))
- for i, (sdata, label) in enumerate(dataloader):
- model.eval()
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
-
- x = sdata.view(sdata.size(0),1,sdata.size(1))
- label = Variable (label.type (LongTensor))
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- correct+= temp
- total+= batch_size
- print ('Accuracy of training data is: %f %%' % (100 * correct / total))
- result_p = []
- result_l = []
- for i, (sdata, label) in enumerate(viddataloader):
- model.eval()
- batch_size = sdata.shape[0]
- sdata = Variable(sdata.type(FloatTensor))
- # print('batch_size:',batch_size)
- x = sdata.view(sdata.size(0),1,sdata.size(1))
- label = Variable (label.type (LongTensor))
- _, predicted = torch.max(model(x).data, 1)
- temp = (predicted == label).sum()
- # print(predicted)
- # print(len(predicted))
- # print(predicted.size())
- for ii in range(len(predicted)):
- if predicted[ii] != label[ii]:
- result_p.append(predicted[ii].item())
- result_l.append(label[ii].item())
- correct1+= temp
- total1+= batch_size
-
- print ('Accuracy of validating data is: %f %%' % (100 * correct1 / total1))
- # for i, (sdata, label) in enumerate(testdataloader):
- # model.eval()
- # batch_size = sdata.shape[0]
- # sdata = Variable(sdata.type(FloatTensor))
-
- # x = sdata.view(sdata.size(0),1,sdata.size(1))
- # label = Variable (label.type (LongTensor))
- # _, predicted = torch.max(model(x).data, 1)
- # temp = (predicted == label).sum()
- # for ii in range(len(predicted)):
- # if predicted[ii] != label[ii]:
- # result_p.append(predicted[ii].item())
- # result_l.append(label[ii].item())
- # correct2+= temp
- # total2+= batch_size
- # print ('Accuracy of testing data is: %d %%' % (100 * correct2 / total2))
-
- error_p = list(set(result_p))
- error_l = list(set(result_l))
- for i in range(len(error_l)):
- label = error_l[i]
- for ii in range(len(error_p)):
- predict = error_p[ii]
- error_sum = 0
- for iii in range(len(result_p)):
- if (result_p[iii]==predict) and (result_l[iii]==label):
- error_sum = error_sum + 1
- if error_sum != 0:
- print('Scenario %d was misjudged to be %d for %d times!' % (label, predict, error_sum))
-
-
-
-
-
-
-
-
-
-
- # df_acc = pd.DataFrame(np.array(acc))
- # df_acc1 = pd.DataFrame(np.array(acc1))
- # out_dir_acc = os.path.join(opt.out_path, "acc")
- # if not os.path.exists(out_dir_acc):
- # os.makedirs(out_dir_acc)
- # save_acc_dir = os.path.join(out_dir_acc,"acc.xlsx")
- # save_acc1_dir = os.path.join(out_dir_acc,"acc1.xlsx")
- # writer = pd.ExcelWriter(save_acc_dir)
- # df_acc.to_excel(writer, 'sheet_1', float_format='%.5f', header=False, index=False)
- # writer.close()
- # writer1 = pd.ExcelWriter(save_acc1_dir)
- # df_acc1.to_excel(writer1, 'sheet_1', float_format='%.5f', header=False, index=False)
- # writer1.close()
-
- end = time.time()
- print(end - start)
-
-
- if __name__ == "__main__":
- parser = get_parser()
- opt = parser.parse_args()
- main(opt)
|