|
- import torch.backends.cudnn as cudnn
- from torch import optim
- import torch.nn as nn
-
- import matplotlib.pyplot as plt
- import time
- from torch.optim import lr_scheduler
- import numpy as np
- from sklearn import metrics
-
- from newx import ei_Net_Resnet50, ei_Net_Resnet50_base
- from columbia_train import *
- #from casia2_train import *
- import os
- from evaluation_precision import accuracy, AverageMeter, metrics,F_measure
- from tensorboardX import SummaryWriter
- import argparse
-
- # Training settings
- parser = argparse.ArgumentParser(description='PyTorch Columbia')
- #数据集位置放在/dataset下
- parser.add_argument('--epochs', type=int, default=1)
- parser.add_argument('--train_batch', type=int, default=10)
- parser.add_argument('--val_batch', type=int, default=10)
-
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- def adjust_learning_rate(optimizer, lr, gamma):
- """Sets the learning rate to the initial LR decayed by schedule"""
- lr *= gamma
- for param_group in optimizer.param_groups:
- param_group['lr'] = lr
- return lr
-
- best_auc = 0.
- best_loss = 3.
- def train_net(net,
- epochs=5,
- lr=1e-2,
- gpu=False,
- save=None,
- train_batch=16,
- val_batch=16,
- ckpt=None):
-
- print('''
- Starting training:
- Epochs: {}, Learning rate: {}
- Train Batch size: {}, Val Batch size: {}
- datasets: bench
- Checkpoints: {}
- CUDA: {}
- '''.format(epochs,lr,train_batch,val_batch,str(save),str(gpu)))
-
- global best_auc
- global best_loss
- writer = SummaryWriter(ckpt)
-
- optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=0.0005)
- criterion = nn.BCELoss()
-
- if gpu:
- net = net.to(device)
- criterion = criterion.to(device)
- print('Total params: %.2fM' % (sum(p.numel() for p in net.parameters())/1000000.0))
-
- clr = 0
- best_f1 = 0.
-
- start_time = time.time()
-
- for epoch in range(epochs):
- if clr > 8:
- if lr > 1e-7:
- lr = adjust_learning_rate(optimizer, lr, 0.1)
- clr = 0
- print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))
-
- train_loss_seg,train_acc_mask = train(net, criterion, optimizer,train_batch, gpu)
- print( 'train_loss_seg:', train_loss_seg, 'train_acc_mask:', train_acc_mask)
-
- val_loss_seg,val_acc_mask,auc,f1 = val(net, criterion, val_batch, gpu)
- print( 'val_loss_seg:', val_loss_seg, 'val_acc_mask:', val_acc_mask,'auc:',auc,'f1:',f1)
-
- # Visualization train
- writer.add_scalar('train/loss/seg', train_loss_seg, epoch)
- writer.add_scalar('train/acc/mask', train_acc_mask, epoch)
- # Visualization val
- writer.add_scalar('val/loss/seg', val_loss_seg, epoch)
- writer.add_scalar('val/acc/mask', val_acc_mask, epoch)
- writer.add_scalar('val/auc/mask', auc, epoch)
- writer.add_scalar('val/f1/mask', f1, epoch)
-
- # visualization learning rate
- writer.add_scalar('lr', lr, epoch)
-
- isauc_best = auc > best_auc
- best_auc = max(auc, best_auc)
- print('best_auc',best_auc)
- if isauc_best:
- torch.save(net.state_dict(),save + '/columbia_tiny_best_auc.pt')
-
- isf_best = val_loss_seg < best_loss
- best_loss = min(val_loss_seg, best_loss)
- print('best_loss',best_loss)
-
- #torch.save(net.state_dict(),save + '/vvvvv.pt')
- if isf_best:
- clr = 0
- else:
- clr += 1
- print('clr', clr)
- torch.save(net.state_dict(),save + '/columbia_tiny_newx.pt')
- writer.close()
- print('Spend time: {:.3f}s'.format(time.time() - start_time))
-
- def train(net, criterion, optimizer, batchsize, gpu):
-
- train_data = columbia("train", newsize=320)
- #train_data = casia2("train", newsize=320)
- train_loader = torch.utils.data.DataLoader(train_data, batch_size=batchsize, shuffle=True)
-
- losses = AverageMeter()
- acces_mask = AverageMeter()
-
- # switch to train mode
- net.train()
-
- for i, data in enumerate(train_loader):
-
- img, mask = data
-
- if gpu:
- img = img.to(device)
- mask = mask.to(device)
-
- pred = net(img)
- pred = torch.sigmoid(pred)
-
- loss = criterion(pred.view(-1), mask.view(-1))
- losses.update(loss.item(), img.size(0))
- if i%5 == 0:
- print('{} --- loss: {:.4f}'.format(i, loss.item()))
-
- # compute gradient and do SGD step
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- pred_sigmoid = torch.sigmoid(pred)
-
- acc_mask = accuracy(pred_sigmoid, mask.squeeze(1))
- acces_mask.update(acc_mask.item(), img.size(0))
-
- return losses.avg, acces_mask.avg
-
- def val(net, criterion, batchsize, gpu):
-
- val_data = columbia("val", newsize=320)
- #val_data = casia2("val", newsize=320)
- val_loader = torch.utils.data.DataLoader(val_data, batch_size=batchsize, shuffle=True)
-
- losses = AverageMeter()
- acces_mask = AverageMeter()
- f1es = []
- auces = []
-
- # switch to train mode
- net.eval()
-
- for i, data in enumerate(val_loader):
-
- img, mask = data
-
- if gpu:
- img = img.to(device)
- mask = mask.to(device)
-
- pred = net(img)
- pred = torch.sigmoid(pred)
-
- loss = criterion(pred.view(-1), mask.view(-1))
- losses.update(loss.item(), img.size(0))
- if i%4 == 0:
- print('{} --- loss: {:.4f}'.format(i, loss.item()))
-
- pred_sigmoid = torch.sigmoid(pred)
- acc_mask = accuracy(pred_sigmoid, mask.squeeze(1))
- acces_mask.update(acc_mask.item(), img.size(0))
-
- aucs,f1s = F_measure(mask.detach().cpu(), pred.detach().cpu())
- auces.extend(aucs)
- f1es.extend(f1s)
-
- mean_auc = sum(auces)/ len(auces)
- mean_f1 = sum(f1es)/ len(f1es)
-
- return losses.avg, acces_mask.avg, mean_auc, mean_f1
-
- if __name__ == '__main__':
-
- args = parser.parse_args()
-
- epochs = args.epochs
- train_batch = args.train_batch
- val_batch = args.val_batch
- #epochs, train_batch, val_batch = 100, 6, 4
-
- gpu = True
- lr = 1e-3
- ft = False
-
- ckpt = "/model/"
- #os.makedirs(ckpt, exist_ok=True)
-
- dir_logs = "/model"
- #os.makedirs(dir_logs, exist_ok=True)
-
- net = ei_Net_Resnet50()
-
- if ft:
- fine_tuning_model = "/home/gaojintong/AM_Net/pt/pre.pt"
- net.load_state_dict(torch.load(fine_tuning_model))
- print('Model loaded from {}'.format(fine_tuning_model))
-
- train_net(net=net,
- epochs=epochs,
- lr=lr,
- gpu=gpu,
- save=dir_logs,
- train_batch=train_batch,
- val_batch=val_batch,
- ckpt=ckpt)
|