|
- import glob
- import os
-
- import cv2
- import numpy as np
- import torch
- from torch.utils.data import Dataset
- from torchvision import transforms
- from PIL import Image
-
- transform_train = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
-
- res_shape = (1024, 1024)
- size = 1024
- class MyDataset(Dataset):
- def __init__(self, data_path):
- self.img_path_A = glob.glob(os.path.join(data_path, 'A', '*.png')) #
- self.img_path_B = glob.glob(os.path.join(data_path, 'B', '*.png'))
- self.mask_path = glob.glob(os.path.join(data_path, 'label', '*.png'))
- self.b_img_path_A = glob.glob(os.path.join(data_path, 'A_b', '*.png'))
- self.b_img_path_B = glob.glob(os.path.join(data_path, 'B_b', '*.png'))
-
- # def __getitem__(self, index):
- # # pytorch自带数据增强
- # images1 = transform_train(np.array(Image.open(self.img_path_A[index]).resize(res_shape, Image.BILINEAR)))
- # images2 = transform_train(np.array(Image.open(self.img_path_B[index]).resize(res_shape, Image.BILINEAR)))
- # b_images1_mask = np.array(Image.open(self.b_img_path_A[index]).resize(res_shape, Image.BILINEAR))
- # b_images2_mask = np.array(Image.open(self.b_img_path_B[index]).resize(res_shape, Image.BILINEAR))
- # labels = np.array(Image.open(self.mask_path[index]).resize(res_shape, Image.BILINEAR))
- # image = torch.cat([images1, images2], 0)
- # img1 = cv2.resize(cv2.imread(self.img_path_A[index], 1), (size, size), interpolation=cv2.INTER_CUBIC)
- # mask1 = (cv2.Canny(img1, 100, 200, 5) / 255)
- #
- # img2 = cv2.resize(cv2.imread(self.img_path_B[index], 1), (size, size), interpolation=cv2.INTER_CUBIC)
- # mask2 = (cv2.Canny(img2, 100, 200, 5) / 255)
- # return image, labels, b_images1_mask, b_images2_mask, mask1, mask2
- def __getitem__(self, index):
- # pytorch自带数据增强
- images1 = transform_train(np.array(Image.open(self.img_path_A[index]).resize(res_shape, Image.BILINEAR)))
- images2 = transform_train(np.array(Image.open(self.img_path_B[index]).resize(res_shape, Image.BILINEAR)))
- b_images1_mask = np.array(Image.open(self.b_img_path_A[index]).resize(res_shape, Image.BILINEAR))/255
- b_images2_mask = np.array(Image.open(self.b_img_path_B[index]).resize(res_shape, Image.BILINEAR))/255
- labels = np.array(Image.open(self.mask_path[index]).resize(res_shape, Image.BILINEAR))/255
- image = torch.cat([images1, images2], 0)
- b_images1_mask[b_images1_mask != 0] = 1
- b_images2_mask[b_images2_mask != 0] = 1
- # img1 = cv2.resize(cv2.imread(self.img_path_A[index], 1), (size, size), interpolation=cv2.INTER_CUBIC)
- # mask1 = (cv2.Canny(img1, 100, 200, 5) / 255)
- #
- # img2 = cv2.resize(cv2.imread(self.img_path_B[index], 1), (size, size), interpolation=cv2.INTER_CUBIC)
- # mask2 = (cv2.Canny(img2, 100, 200, 5) / 255)
- return image, labels, b_images1_mask, b_images2_mask
-
- def __len__(self):
- return len(self.img_path_A) # 返回数据的总个数
-
-
- def Mydataset_collate(batch):
- images1 = []
- images2 = []
- masks = []
- for images1, images2, masks in batch:
- images1.append(images1)
- images2.append(images2)
- masks.append(masks)
- images1 = np.array(images1)
- images2 = np.array(images2)
- masks = np.array(masks)
- return images1, images2, masks
|