#2 上传文件至 'tra'

Merged
chengKun merged 1 commits from devtrain into master 1 year ago
  1. +33
    -0
      tra/Timer.py
  2. +87
    -0
      tra/_01TrainMain.py
  3. +122
    -0
      tra/_02PipeDatasetLoader.py
  4. +140
    -0
      tra/_03Unet.py
  5. +23
    -0
      tra/_04Loss.py
  6. +57
    -0
      tra/_10ShowTrainingProcess.py
  7. +82
    -0
      tra/_20TestMain.py
  8. +65
    -0
      tra/_21CalEvaluationIndicator.py
  9. +64
    -0
      tra/_30Predict.py

+ 33
- 0
tra/Timer.py View File

@@ -0,0 +1,33 @@
#-*- coding:utf-8 _*-
"""
@Author : Xiaoqi Cheng Yutao
@Time : 2019/10/26 8:34
"""
import time

def timer(long = -1):
start = time.time()
def end(method_name="Unnamed function"):
print(method_name + " took : " + str(time.time() - start)[0:long] + " seconds.")
return

return end

class Timer():
def __init__(self,long = 6):
self.long = long
self.start = time.time()
def __str__(self): # 魔法方法 print实现时间打印
return ("Time: " + str(time.time() - self.start)[0:self.long] + " seconds.")

if __name__ == '__main__':
end = timer(long=8)
time.sleep(1)
end("Test")

a = Timer(8)
time.sleep(1)
print(a)
time.sleep(1)
print(a)


+ 87
- 0
tra/_01TrainMain.py View File

@@ -0,0 +1,87 @@
# -*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import logging, os, torch
from Timer import *
from _02PipeDatasetLoader import *
from _03Unet import *
from _04Loss import *

WeightCoefficient = 2
Lr = 0.01
Epochs = 300
LrDecay = 0.1
BatchSize = 30
LrDecayPerEpoch = 100 # 学习率调整的epoch
ValidPerEpoch = 50 # 测试的epoch
SavePerEpoch = 300 # 保存结果的epoch
torch.cuda.set_device(0) # 选用GPU设备

# %% TODO:载入数据,初始化网络,定义目标函数
FolderPath = '../Dataset'
TrainDataset, TrainDataLoader, ValDataset, ValDataLoader = PipeDatasetLoader(FolderPath, BatchSize)
# %% Unet_BCELoss_Adam
Unet = UNet(in_channels=3, out_channels=1, init_features=4, WithActivateLast=True, ActivateFunLast = torch.sigmoid).to('cuda')
SaveFolder = 'Output'
Criterion = nn.BCELoss().to('cuda')
Optimizer = torch.optim.Adam(Unet.parameters(), lr=Lr)
os.makedirs(SaveFolder, exist_ok=SaveFolder)
logging.basicConfig(filename=os.path.join(SaveFolder, 'log.txt'), filemode='w', level=logging.WARNING, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d-%H:%M:%S')
# Unet.load_state_dict(torch.load(os.path.join(SaveFolder, 'PreTrained.pt'), map_location = 'cuda'))
logging.warning('WeightCoefficient:{0:03d}'.format(WeightCoefficient))

# %% TODO:开始循环训练
LrScheduler = torch.optim.lr_scheduler.StepLR(Optimizer, step_size=LrDecayPerEpoch, gamma=LrDecay) # 设置学习率策略
for Epoch in range(1, Epochs + 1):
# %% 训练
Unet.train() # 训练模式
# torch.cuda.empty_cache() # 释放缓存占用, 耗时大概0.05s
# 训练一个Epoch
TrainLoss = 0
print('Epoch:%d, LR:%.8f ' % (Epoch, LrScheduler.get_lr()[0]), end='>> ', flush=True)
for Iter, (InputImg, Label, SampleName) in enumerate(TrainDataLoader):
print(Iter, end=' ', flush=True)
InputImg = InputImg.float().to('cuda')
Label = Label.float().to('cuda')
Weight = Label * (WeightCoefficient-1) + 1
Criterion.weight = Weight
Optimizer.zero_grad()
with torch.set_grad_enabled(True):
OutputImg = Unet(InputImg)
BatchLoss = Criterion(OutputImg, Label)
BatchLoss.backward()
Optimizer.step()
TrainLoss += BatchLoss.item()
AveTrainLoss = TrainLoss / TrainDataset.__len__() * BatchSize # 平均每幅图像的loss
print(", Total loss is: %.6f" % float(AveTrainLoss))
logging.warning('\tTrain\tEpoch:{0:04d}\tLearningRate:{1:08f}\tLoss:{2:08f}'.format(Epoch, LrScheduler.get_lr()[0], AveTrainLoss))

# %% 测试
if Epoch % ValidPerEpoch == 0 or Epoch == 1:
Unet.eval() # 训练模式
torch.cuda.empty_cache() # 释放缓存占用
ValLoss = 0
print('Validate:', end='>>', flush=True)
for Iter, (InputImg, Label, SampleName) in enumerate(ValDataLoader):
print(Iter, end=' ', flush=True)
InputImg = InputImg.float().to('cuda')
Label = Label.float().to('cuda')
Weight = Label * (WeightCoefficient - 1) + 1
Criterion.weight = Weight
with torch.set_grad_enabled(False): # 等同于torch.no_grad()
OutputImg = Unet(InputImg)
BatchLoss = Criterion(OutputImg, Label) # CrossEntropyLoss的Target必须没有通道的维度,即(BatchSize, W, H)
ValLoss += BatchLoss.item()
AveValLoss = ValLoss / ValDataset.__len__()
print("Total loss is: %.6f" % float(AveValLoss))
logging.warning('\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tValid\tEpoch:{0:04d}\tLearningRate:{1:08f}\tLoss:{2:08f}'.format(Epoch, LrScheduler.get_lr()[0], AveValLoss))

# %% 保存
if Epoch % SavePerEpoch == 0:
torch.save(Unet.state_dict(), os.path.join(SaveFolder, '{0:04d}.pt'.format(Epoch)))

# %% 每隔一定epoch后更新一次学习率
LrScheduler.step()


+ 122
- 0
tra/_02PipeDatasetLoader.py View File

@@ -0,0 +1,122 @@
# -*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import torch, os, cv2
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import random

InputImgSize=(128,128)
# %% 训练过程图片的变换
TrainImgTransform = transforms.Compose([
# transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.5, 2.), shear=10),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
transforms.RandomResizedCrop(InputImgSize, scale=(1., 1.), interpolation=Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.46], std=[0.10]),
])
TrainLabelTransform = transforms.Compose([
# transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.5, 2.), shear=10),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
transforms.RandomResizedCrop(InputImgSize, scale=(1., 1.), interpolation=Image.NEAREST),
# transforms.RandomResizedCrop(InputImgSize, scale=(1., 1.)),
transforms.ToTensor(),
])

# %% 测试过程图片变换
ValImgTransform = transforms.Compose([
transforms.Resize(InputImgSize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.46], std=[0.10]),
])
ValLabelTransform = transforms.Compose([
transforms.Resize(InputImgSize, interpolation=Image.NEAREST),
transforms.ToTensor(),
])
# TODO:预测过程图片变换
# 预测过程图片变换
PredictImgTransform = transforms.Compose([
transforms.Resize(InputImgSize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.46], std=[0.10])
])

class PipeDataset(Dataset):
def __init__(self, DatasetFolderPath, ImgTransform, LabelTransform, ShowSample=False):
self.DatasetFolderPath = DatasetFolderPath
self.ImgTransform = ImgTransform
self.LabelTransform = LabelTransform
self.ShowSample = ShowSample
self.SampleFolders = os.listdir(self.DatasetFolderPath)

def __len__(self):
return len(self.SampleFolders)

def __getitem__(self, item):
SampleFolderPath = os.path.join(self.DatasetFolderPath, self.SampleFolders[item]) # 样本文件夹路径
FusionImgPath = os.path.join(SampleFolderPath, 'img.png')
LabelImgPath = os.path.join(SampleFolderPath, 'label.png')
FusionImg = Image.open(FusionImgPath)
LabelImg = Image.open(LabelImgPath)
LabelImg = np.array(LabelImg)*255
LabelImg = Image.fromarray(LabelImg)

# %% 保证样本和标签具有相同的变换
seed = np.random.randint(2147483647)
random.seed(seed)
FusionImg = self.ImgTransform(FusionImg)
random.seed(seed)
LabelImg = self.LabelTransform(LabelImg)

# %% 显示Sample
if self.ShowSample:
plt.figure(self.SampleFolders[item])
Img = FusionImg.numpy()[0]
Label = LabelImg.numpy()[0]
Img = (Normalization(Img) * 255).astype(np.uint8)
Label = (Normalization(Label) * 255).astype(np.uint8)
Img = cv2.cvtColor(Img, cv2.COLOR_GRAY2RGB)
Img[..., 2] = Label
plt.imshow(Img)
plt.show()
return FusionImg, LabelImg, self.SampleFolders[item]


def PipeDatasetLoader(FolderPath, BatchSize=1, ShowSample=False):
TrainFolderPath = os.path.join(FolderPath, 'Train')
TrainDataset = PipeDataset(TrainFolderPath, TrainImgTransform, TrainLabelTransform, ShowSample)
TrainDataLoader = DataLoader(TrainDataset, batch_size=BatchSize, shuffle=True, drop_last=False, num_workers=0, pin_memory=True)
ValFolderPath = os.path.join(FolderPath, 'Val')
ValDataset = PipeDataset(ValFolderPath, ValImgTransform, ValLabelTransform, ShowSample)
ValDataLoader = DataLoader(ValDataset, batch_size=1, shuffle=False, drop_last=False, num_workers=0, pin_memory=True)
# TODO:读取PredictDataset
# PredictFolderPath = os.path.join(FolderPath)
# PredictDataset = PipeDataset(PredictFolderPath)
return TrainDataset, TrainDataLoader, ValDataset, ValDataLoader


def Normalization(Array): # 数组归一化到0~1
min = np.min(Array)
max = np.max(Array)
if max - min == 0:
return Array
else:
return (Array - min) / (max - min)


if __name__ == '__main__':
FolderPath = '../Dataset'
TrainDataset, TrainDataLoader, ValDataset, ValDataLoader = PipeDatasetLoader(FolderPath, BatchSize=1, ShowSample=True)
for epoch in range(1):
for i, (Img, Label, SampleName) in enumerate(TrainDataLoader):
print(SampleName)
print(Img.shape)
print(Label.max())


+ 140
- 0
tra/_03Unet.py View File

@@ -0,0 +1,140 @@
# -*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/14 16:00
"""
''' 该文件定义了图像分割使用的Unet '''
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np


class UNet(nn.Module):
def __init__(self, in_channels=3, out_channels=1, init_features=32, WithActivateLast = True, ActivateFunLast = None):
super(UNet, self).__init__()
features = init_features
self.WithActivateLast = WithActivateLast # True:则最后一层输出增加激活,False:最后一层输出不加激活
self.ActivateFunLast = ActivateFunLast # 如果需要激活层,设置最后激活层函数
self.encoder1 = UNet._block(in_channels, features, name="enc1")
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = UNet._block(features, features * 2, name="enc2")
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = UNet._block(features * 2, features * 4, name="enc3")
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = UNet._block(features * 4, features * 8, name="enc4")
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck")

self.upconv4 = nn.ConvTranspose2d(
features * 16, features * 8, kernel_size=2, stride=2
)
self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(
features * 8, features * 4, kernel_size=2, stride=2
)
self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(
features * 4, features * 2, kernel_size=2, stride=2
)
self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(
features * 2, features, kernel_size=2, stride=2
)
self.decoder1 = UNet._block(features * 2, features, name="dec1")
self.conv = nn.Conv2d(
in_channels=features, out_channels=out_channels, kernel_size=1
)

def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))

bottleneck = self.bottleneck(self.pool4(enc4))

dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1) # 2*32*256*256
if self.WithActivateLast:
# return torch.sigmoid(self.conv(dec1)) # BS*1*256*256
return self.ActivateFunLast(self.conv(dec1))
else:
return self.conv(dec1) # BS*1*256*256


'''
staticmethod用于修饰类中的方法,使其可以在不创建类实例的情况下调用方法,这样做的好处是执行效率比较高。
当然,也可以像一般的方法一样用实例调用该方法。该方法一般被称为静态方法。静态方法不可以引用类中的属性或方法,
其参数列表也不需要约定的默认参数self。我个人觉得,静态方法就是类对外部函数的封装,有助于优化代码结构和提高程序的可读性。
当然了,被封装的方法应该尽可能的和封装它的类的功能相匹配。
'''
@staticmethod
def _block(in_channels, features, name):
return nn.Sequential(
OrderedDict( # 用字典的形式进行网络定义,字典key即为网络每一层的名称
[
(
name + "conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm1", nn.BatchNorm2d(num_features=features)),
(name + "relu1", nn.ReLU(inplace=True)),
(
name + "conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm2", nn.BatchNorm2d(num_features=features)),
(name + "relu2", nn.ReLU(inplace=True)),
]
)
)


if __name__ == '__main__':
Input = torch.randn((2, 1, 256, 256)) # 任意生成样本
Target = torch.empty((2, 1, 256, 256), dtype=torch.long).random_(2) # 任意生成标签

Unet = UNet(in_channels=1, out_channels=2) # 2分类输出为两个通道
LossFun = nn.CrossEntropyLoss()
Output = Unet(Input)
print(Output.shape)
print(Target.shape)

# %% 官方函数计算CrossEntropyLoss
BatchLoss = LossFun(Output, Target[:, 0, :, :]) # CrossEntropyLoss的Target必须没有通道的维度,即(BatchSize, W, H)
print(BatchLoss)

# %% 手写验证CrossEntropyLoss
Errs = []
for i, Sample in enumerate(Output): # 遍历每个样本,每个像素点
for j in range(256):
for k in range(256):
temppredict = Output[i, :, j, k] # 每个像素的5个预测概率
temptarget = Target[i, 0, j, k] # 每个像素的真实归类
err = -temppredict[temptarget] + torch.log(torch.sum(np.e ** temppredict)) # 计算每个样本交叉熵
Errs.append(err.detach().numpy())
print(np.mean(Errs))

+ 23
- 0
tra/_04Loss.py View File

@@ -0,0 +1,23 @@
#-*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import numpy as np
import torch.nn as nn
import torch

class DiceLoss(nn.Module):
'''Dice系数 https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient'''
def __init__(self):
super(DiceLoss, self).__init__()
self.smooth = 1.0

def forward(self, y_pred, y_true): # BS*1*256*256
assert y_pred.size() == y_true.size()
y_pred = y_pred[:, 0].contiguous().view(-1) # 将变量展开,view需要tensor的内存是整块的
y_true = y_true[:, 0].contiguous().view(-1)
intersection = (y_pred * y_true).sum() # 正样本的交集(即,mask为1的部分交集)
dsc = (2. * intersection + self.smooth) / (y_pred.sum() + y_true.sum() + self.smooth)
return 1. - dsc


+ 57
- 0
tra/_10ShowTrainingProcess.py View File

@@ -0,0 +1,57 @@
#-*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
""" 该函数用于展示训练进度 """
import os
import numpy as np
np.set_printoptions(suppress=True, precision=8)
import matplotlib.pyplot as plt


SaveFolders = ['Output']

for SaveFolder in SaveFolders:
TrainLosses = [] # 相同学习率的loss
ValidLosses = []
with open(os.path.join(SaveFolder, 'log.txt'), 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'Train' in line:
Epoch = float(line.strip().split('\t')[2].split(':')[1])
Lr = float(line.strip().split('\t')[3].split(':')[1])
Loss = float(line.strip().split('\t')[4].split(':')[1])
if Loss<1:
TrainLosses.append(np.array([Epoch, Lr, Loss]))

elif 'Valid' in line:
Epoch = float(line.strip().split('\t')[16].split(':')[1])
Lr = float(line.strip().split('\t')[17].split(':')[1])
Loss = float(line.strip().split('\t')[18].split(':')[1])
if Loss<1:
ValidLosses.append(np.array([Epoch, Lr, Loss]))

TrainLosses = np.vstack(TrainLosses)
ValidLosses = np.vstack(ValidLosses)
Lrs = np.unique(TrainLosses[..., 1])
Colors = ['r','g','b','y','c','m','k']

# %% 根据Lrs对TrainLosses进行分割
NewTrainLosses = []
for Lr in Lrs:
Indx = np.where(TrainLosses[:,1] == Lr)
TrainLoss = TrainLosses[Indx, :][0]
NewTrainLosses.append(TrainLoss)

fig = plt.figure(SaveFolder)
for TrainLoss in NewTrainLosses:
plt.plot(TrainLoss[..., 0], TrainLoss[..., 2])
plt.plot(ValidLosses[..., 0], ValidLosses[..., 2])
# plt.xticks(np.arange(0,2500,500))
plt.show()






+ 82
- 0
tra/_20TestMain.py View File

@@ -0,0 +1,82 @@
#-*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import torch, os
from Timer import *
from _02PipeDatasetLoader import *
from _03Unet import *
from _21CalEvaluationIndicator import *
from PIL import Image
Device = torch.device("cuda:0")

def read_images(path): # 读取图像
files = os.listdir(path)
img_files =[]
for file in files:
index = file.find('.')
prefix = file[index+1:]
if prefix in ['jpg', 'png']:
img_files.append(file)
return img_files

# %% 载入数据、模型
# FolderPath = '/home/cxq/workspace2/2019.10.23PipeEdgeDetecion/2019.10.23LossFunctionTest/Test/Dataset'
FolderPath = '../Dataset'
UnconvertedPath = './Output/Unconverted'
ConvertedPath = './Output/Converted'
MaskPath = './Output/Mask'
TrainDataset, TrainDataLoader, ValDataset, ValDataLoader = PipeDatasetLoader(FolderPath, 1)
# Unet_BCELoss_Adam
SaveFolder = 'Output'
Unet = UNet(in_channels=3, out_channels=1, init_features=4, WithActivateLast = True, ActivateFunLast = torch.sigmoid).to(Device)
Unet.load_state_dict(torch.load(os.path.join(SaveFolder, '0300.pt'), map_location = Device))

# %% 测试
Unet.eval() # 评估模式
torch.set_grad_enabled(False)
OutputS = [] # 存储检测数据,用于指标计算
LabelS = []
for Iter, (Input, Label, SampleName) in enumerate(ValDataLoader):
end = timer(8)
print(SampleName)
InputImg = Input.float().to(Device)
OutputImg = Unet(InputImg)
Output = OutputImg.cpu().numpy()[0]
Label = Label.detach().cpu().numpy()[0]
OutputS.append(Output)
LabelS.append(Label)
end('5555')
# 生成效果图
OutputImg = OutputImg.cpu().numpy()[0, 0]
OutputImg = (OutputImg*255).astype(np.uint8)
Input = Input.numpy()[0][0]
Input = (Normalization(Input) * 255).astype(np.uint8)
ResultImg = cv2.cvtColor(Input, cv2.COLOR_GRAY2RGB)
ResultImg[...,2] = OutputImg
plt.show()
Mask = Label[0]

cv2.imwrite(os.path.join(UnconvertedPath, SampleName[0] + '.png'), ResultImg)
#cv2.imwrite(os.path.join(MaskPath, SampleName[0] + '.png'), MaskImg)
# image_array是归一化的二维浮点数矩阵
# Mask *= 255 # 变换为0-255的灰度值
# MaskImg = Image.fromarray(Mask)
# Mask = Label[0]
# MaskImg = MaskImg.convert('L') # 这样才能转为灰度图,如果是彩色图则改L为‘RGB’
# MaskImg.save(os.path.join(MaskPath, SampleName[0] + '.png'))
# 上面这个方法可以保存图像,但会导致指标计算错误

OutputFlatten = np.vstack(OutputS).ravel()
LabelFlatten = np.vstack(LabelS).ravel()
#%% ROC, AUC
fpr, tpr, AUC = ROC_AUC(LabelFlatten, OutputFlatten, ShowROC = True)
print('AUC:', AUC)
#%% POC, AP
recall, precision, MF, AP = PRC_AP_MF(LabelFlatten, OutputFlatten, ShowPRC = True)
# mIOU = iou_mean(LabelFlatten, OutputFlatten, n_classes=1)
# print(mIOU)
print('MF:', MF)
print('AP:', AP)
plt.show()

+ 65
- 0
tra/_21CalEvaluationIndicator.py View File

@@ -0,0 +1,65 @@
# -*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import sys
import numpy as np
import torch

np.set_printoptions(suppress=True, precision=4)
import numpy.random as r
import sklearn.metrics as m
import matplotlib.pyplot as plt


def ROC_AUC(LabelFlatten, OutputFlatten, ShowROC=False):
fpr, tpr, th = m.roc_curve(LabelFlatten, OutputFlatten)
AUC = m.auc(fpr, tpr) # AUC其实就是ROC曲线下边的面积
if ShowROC:
plt.figure('ROC curve')
plt.plot(fpr, tpr)
plt.xlabel('fpr')
plt.ylabel('tpr')
# plt.show()
return fpr, tpr, AUC


def PRC_AP_MF(LabelFlatten, OutputFlatten, ShowPRC=False):
precision, recall, th = m.precision_recall_curve(LabelFlatten, OutputFlatten)
F1ScoreS = 2 * (precision * recall) / ((precision + recall) + sys.float_info.min)
MF = F1ScoreS[np.argmax(F1ScoreS)] # Maximum F-measure at optimal dataset scale
AP = m.average_precision_score(LabelFlatten, OutputFlatten) # AP其实就是PR曲线下边的面积
if ShowPRC:
plt.figure('Precision recall curve')
plt.plot(recall, precision)
plt.ylim([0.0, 1.0])
plt.xlabel('recall')
plt.ylabel('precision')
# plt.show()
return recall, precision, MF, AP


def iou_mean(pred, target, n_classes=1):
# n_classes :the number of classes in your dataset,not including background
# for mask and ground-truth label, not probability map
ious = []
iousSum = 0
pred = torch.from_numpy(pred)
pred = pred.view(-1)
target = np.array(target)
target = torch.from_numpy(target)
target = target.view(-1)

# Ignore IoU for background class ("0")
for cls in range(1, n_classes + 1): # This goes from 1:n_classes-1 -> class "0" is ignored
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu().item() # Cast to long to prevent overflows
union = pred_inds.long().sum().data.cpu().item() + target_inds.long().sum().data.cpu().item() - intersection
if union == 0:
ious.append(float('nan')) # If there is no ground truth, do not include in evaluation
else:
ious.append(float(intersection) / float(max(union, 1)))
iousSum += float(intersection) / float(max(union, 1))
return iousSum / n_classes

+ 64
- 0
tra/_30Predict.py View File

@@ -0,0 +1,64 @@
#-*- coding:utf-8 _*-
"""
@Author : Cui Baoyi
@Time : 2021/03/16 9:52
"""
import torch, os
from Timer import *
from _02PipeDatasetLoader import *
from _03Unet import *
from _21CalEvaluationIndicator import *
from PIL import Image
Device = torch.device("cuda:0")

def read_images(path): # 读取图像
files = os.listdir(path)
img_files =[]
for file in files:
index = file.find('.')
prefix = file[index+1:]
if prefix in ['jpg', 'png']:
img_files.append(file)
return img_files

# %% 载入数据、模型
# FolderPath = '/home/cxq/workspace2/2019.10.23PipeEdgeDetecion/2019.10.23LossFunctionTest/Test/Dataset'
FolderPath = '../Dataset'
UnconvertedPath = './Output/Unconverted'
ConvertedPath = './Output/Converted'
MaskPath = './Output/Mask'

TrainDataset, TrainDataLoader, ValDataset, ValDataLoader = PipeDatasetLoader(FolderPath, 1)
# Unet_BCELoss_Adam
SaveFolder = 'Output'
Unet = UNet(in_channels=3, out_channels=1, init_features=4, WithActivateLast = True, ActivateFunLast = torch.sigmoid).to(Device)
Unet.load_state_dict(torch.load(os.path.join(SaveFolder, '0300.pt'), map_location = Device))

# %% 测试
Unet.eval() # 评估模式
torch.set_grad_enabled(False)
OutputS = [] # 存储检测数据,用于指标计算
LabelS = []
print(ValDataLoader)
for Iter, (Input, Label, SampleName) in enumerate(ValDataLoader):
end = timer(8)
print(SampleName)
InputImg = Input.float().to(Device)
OutputImg = Unet(InputImg)
Output = OutputImg.cpu().numpy()[0]
Label = Label.detach().cpu().numpy()[0]
OutputS.append(Output)
LabelS.append(Label)
end('5555')
# 生成效果图
OutputImg = OutputImg.cpu().numpy()[0, 0]
OutputImg = (OutputImg*255).astype(np.uint8)
Input = Input.numpy()[0][0]
Input = (Normalization(Input) * 255).astype(np.uint8)
ResultImg = cv2.cvtColor(Input, cv2.COLOR_GRAY2RGB)
ResultImg[...,2] = OutputImg

plt.show()

cv2.imwrite(os.path.join(UnconvertedPath, SampleName[0] + '.png'), ResultImg)


Loading…
Cancel
Save