|
- import os.path
- import numpy as np
- from datetime import datetime
- import imageio
- import cv2
-
- import torch.nn
- from torch.nn import CrossEntropyLoss
- # from torch.nn import Loss
- from torch.utils.data import DataLoader
- from torch.utils.tensorboard import SummaryWriter # tensorboard --> 可视化
- from torch.optim.lr_scheduler import ExponentialLR, CosineAnnealingLR
- from torch.nn.functional import one_hot
- # from torch.cuda.amp import GradScaler as GradScaler
- # from torch.cuda.amp import autocast as autocast
-
- from utils.dataset.TT_Dataset import MyDataset # 读取数据所用函数
- from utils.loss import FocalLoss, SoftDiceLossV2,DSCLoss,softmax_focalloss
- from Model.UNet.unet import UNet
- from utils.utils import Logger
-
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-
- def estimate(y_label, y_pred):
- # y_pred[y_label==0]=0
- # 准确率
- acc = np.mean(np.equal(y_label.cpu().numpy(), y_pred.cpu().numpy()) + 0)
-
- return acc, y_pred
-
-
- # 参数
- num_classes = 8
- epochs = 200
- batch_size = 8
- modelname = "UNet"
- tif_save_path = os.path.join(os.getcwd(), 'Train', "result", modelname)
- if os.path.exists(tif_save_path) == False:
- os.makedirs(tif_save_path)
-
-
- def train():
- # 可视化
- now = datetime.now()
- now = str(now.month) + '_' + str(now.day) + '_' + str(now.hour + 8) + '_' + str(now.minute)
- log_path = os.path.join(os.getcwd(), 'Train', "logs", modelname + "_logs") + "\\" + now + ".log"
- if os.path.exists(os.path.join(os.getcwd(), 'Train', "logs", modelname + "_logs")) == False:
- os.makedirs(os.path.join(os.getcwd(), 'Train', "logs", modelname + "_logs"))
-
- f = open(log_path, 'w')
- f.close()
-
- log = Logger(log_path, level='debug')
- log.logger.info('coastline imagesize=256')
-
- tensorboardPath = os.path.join(os.getcwd(), 'logs', modelname, now) # 可视化文件所在的文件夹
- if os.path.exists(tensorboardPath) == False:
- os.makedirs(tensorboardPath)
-
- writer = SummaryWriter(tensorboardPath)
-
- # 数据处理
- imagePath = r"E:\yqj\try\code\torch\Train\Data\coastline\img"
- labelPath = r"E:\yqj\try\code\torch\Train\Data\coastline\lab_type"
-
- # imagePath = os.path.join(os.getcwd(), 'Train', "coastline", "image")
- # labelPath = os.path.join(os.getcwd(), 'Train', "coastline","label_class","class")
-
-
- # 归一化
- # B1, B2, B3, B4 = cv2.split(val_image)
- # B1_normalization = ((B1 - np.min(B1)) / (np.max(B1) - np.min(B1)) * 1).astype('float32')
- # B2_normalization = ((B2 - np.min(B2)) / (np.max(B2) - np.min(B2)) * 1).astype('float32')
- # B3_normalization = ((B3 - np.min(B3)) / (np.max(B3) - np.min(B3)) * 1).astype('float32')
- # B4_normalization = ((B4 - np.min(B4)) / (np.max(B4) - np.min(B4)) * 1).astype('float32')
- # val_image = cv2.merge([B1_normalization, B2_normalization, B3_normalization, B4_normalization])
- # val_image = torch.permute(val_image, [2,0,1])
-
- # 构建数据集
- trainDataset = MyDataset(imagePath, labelPath)
- trainDatasetloader = DataLoader(trainDataset, batch_size, shuffle=True)
- trainLen = len(trainDatasetloader)
-
- # 定义模型
- model = UNet(num_classes=num_classes).to(device)
- total = sum([param.nelement() for param in model.parameters()])
- print("Number of parameter: %.2fM" % (total / 1e6))
-
- # 损失函数 优化器
- loss = CrossEntropyLoss()
- # loss = FocalLoss()
- # loss = SoftDiceLossV2(num_classes=7)
- # loss = DSCLoss()
- # loss = softmax_focalloss()
- # opt1 = torch.optim.SGD(model.parameters(),momentum=0.9,lr=0.0001)
- optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.01, betas=(0.9, 0.95))
- scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
- optimizer,
- T_0=3, # T_0就是初始restart的epoch数目
- T_mult=13, # T_mult就是重启之后因子,即每个restart后,T_0 = T_0 * T_mult
- eta_min=1e-5 # 最低学习率
- )
-
- # scaler = GradScaler()
- max_acc = 0
-
- # 训练
- for epoch in range(epochs):
- print(
- f"\n----------------------------------------------epoch: {epoch}----------------------------------------------")
- total_loss = 0
- total_acc = 0
-
- num = 0
- for i, data in enumerate(trainDatasetloader):
- img, lab = data
- img = img.to(device)
- lab = lab.to(device)
-
- # 梯度清零
- optimizer.zero_grad()
- output = model(img)
- print("start loss")
- print(now)
- model_loss = softmax_focalloss(output, lab.long())
- print(now)
- model_loss.backward()
- optimizer.step()
- total_loss += model_loss
-
- # with autocast():
- # output = model(img)
- # # model_loss = loss(torch.squeeze(output), torch.squeeze(lab.to(torch.float32)))
- # model_loss = loss(output, lab.long())
- # total_loss += model_loss # 每一步的loss求和
-
- # # loss回传 优化器更新参数
- # scaler.scale(model_loss).backward()
- # scaler.step(optimizer)
- # scaler.update()
-
- lr = optimizer.param_groups[0]["lr"] # 当前学习率
- print("\r train: epoch: {}, step: {}/{}, lr: {}, loss: {} ".format(epoch, i, trainLen, lr,
- round(float(total_loss / (i + 1)), 8)),
- end='')
-
- # 每一个epoch的平均loss
- epoch_loss = total_loss * 1.0 / trainLen
- # epoch_acc = total_acc * 1.0 /len(valDataset)
- scheduler.step()
-
- writer.add_scalar('Train Epoch Loss', round(float(epoch_loss), 8), epoch + 1)
- writer.add_scalar('Train Epoch Lr ', round(float(lr), 8), epoch + 1)
- # writer.add_scalar('Val Epoch acc', epoch_acc, epoch + 1)
- print("\r epoch: {}, epoch_loss: {}".format(epoch, round(float(epoch_loss), 8)), end='')
- # 保存模型
- save_name = str(epoch + 1) + '-' + str(round(float(epoch_loss), 5)) + ".pth" # 模型名称
- savepath = os.path.join(os.getcwd(), 'Train', "save_model", modelname) # 模型所在文件夹
-
- Path = os.path.join(savepath, save_name)
- if os.path.exists(savepath) == False:
- os.makedirs(savepath)
-
-
- # # 前190个epoch: 每20个epoch保存一次
- # if ((epoch + 1) % 10 == 0 and (epoch + 1) < 590):
- # torch.save(model.state_dict(), Path)
- # # 最后10个epoch: 每个epoch保存一个
- # elif ((epoch + 1) > 590):
- # torch.save(model.state_dict(), Path)
-
-
- if __name__ == '__main__':
- train()
-
-
|