|
- import glob
- import os
- import numpy as np
- import torch
- from torch.utils.data import Dataset
- from torchvision import transforms
- from PIL import Image
-
- # ImageNet 数据集的均值和方差
- transform_train = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
- # LEVIR-CD 数据集的均值和方差
- # transform_train = transforms.Compose([
- # transforms.ToTensor(),
- # transforms.Normalize(mean=[0.356, 0.346, 0.295], std=[0.186, 0.177, 0.167])])
- #
-
- res_shape = (1024, 1024)
-
-
- class MyDataset(Dataset):
- def __init__(self, data_path):
- self.img_path_A = glob.glob(os.path.join(data_path, 'A', '*.png')) #
- self.img_path_B = glob.glob(os.path.join(data_path, 'B', '*.png'))
- self.mask_path = glob.glob(os.path.join(data_path, 'label', '*.png'))
-
- def __getitem__(self, index):
- # 使用 albumentations 库进行数据增强
- # images1 = np.array(imageio.v3.imread(self.img_path_A[index]))/255
- # labels = np.array(imageio.v3.imread(self.mask_path[index])) / 255
- # images2 = np.array(imageio.v3.imread(self.img_path_B[index]))/255
- # # images1 = self.transforms_2(image1=images1, mask=labels)["image"]
- # # images2 = self.transforms_2(image1=images2, mask=labels)["image"]
- #
- # # augmented = self.transforms(image1=images1, image2=images2, mask=labels)
- # return torch.tensor(images1).to(torch.float32).permute(2, 0, 1), torch.tensor(
- # images2).to(
- # torch.float32).permute(2, 0, 1), torch.squeeze(
- # torch.tensor(labels)).to(torch.int64) # 返回数据还有标签
-
- # pytorch自带数据增强
- images1 = transform_train(np.array(Image.open(self.img_path_A[index]).resize(res_shape, Image.BILINEAR)))
- images2 = transform_train(np.array(Image.open(self.img_path_B[index]).resize(res_shape, Image.BILINEAR)))
- labels = np.array(Image.open(self.mask_path[index]).resize(res_shape, Image.BILINEAR)) / 255
- labels[labels != 0] = 1
- # image = torch.cat([images1, images2], 0)
- return images1, images2, labels
-
- def __len__(self):
- return len(self.img_path_A) # 返回数据的总个数
-
-
- def Mydataset_collate(batch):
- images1 = []
- images2 = []
- masks = []
- for images1, images2, masks in batch:
- images1.append(images1)
- images2.append(images2)
- masks.append(masks)
- images1 = np.array(images1)
- images2 = np.array(images2)
- masks = np.array(masks)
- return images1, images2, masks
|