|
- diff a/utils/change_data.py b/utils/change_data.py (rejected hunks)
- @@ -98,119 +98,119 @@
- image_B = cv2.flip(image_B, 0)
- mask = cv2.flip(mask, 0)
-
- return image_A, image_B, mask
-
-
- def randomRotate90(image_A, image_B, mask, ratio=0.5):
- if np.random.random() < ratio:
- image_A = np.rot90(image_A).copy()
- image_B = np.rot90(image_B).copy()
- mask = np.rot90(mask).copy()
-
- return image_A, image_B, mask
-
-
- def data_agu(image_A, image_B, label):
- # image_A = cv2.imread("15_A.tif")
- # image_B = cv2.imread("15_B.tif")
- # label = cv2.imread("15.png")
-
- image_A, image_B = randomHueSaturationValue(image_A, image_B)
-
- image_A, image_B, label = randomShiftScaleRotate(image_A, image_B, label)
-
- image_A, image_B, label = randomHorizontalFlip(image_A, image_B, label)
- image_A, image_B, label = randomVerticleFlip(image_A, image_B, label)
- image_A, image_B, label = randomRotate90(image_A, image_B, label)
-
- # label[label==1] = 255
- # image_a_b_label = np.hstack((image_A, image_B, label))
- # cv2.imwrite("15_a_b_1.png", image_a_b_label)
-
- return image_A, image_B, label
-
-
- # def transform_2(image1, mask):
- # aug = A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False,
- # p=1.0)
- # augmented = aug(image=image1, mask=mask)
- # return augmented
- #
- #
- # def transform(image1, image2, mask):
- # masks = [image2, mask]
- # aug = A.Compose([
- # A.Resize(128, 128),
- # A.VerticalFlip(p=0.5),
- # A.RandomRotate90(p=0.5),
- #
- # ])
- #
- # augmented = aug(image=image1, masks=masks)
- # return augmented
- # resize = transforms.Resize([256, 256])
- transform_train = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
- res_shape = (224, 224)
-
-
- class MyDataset(Dataset):
- def __init__(self, data_path):
- self.img_path_A = glob.glob(os.path.join(data_path, 'A', '*.png')) #
- self.img_path_B = glob.glob(os.path.join(data_path, 'B', '*.png'))
- self.mask_path = glob.glob(os.path.join(data_path, 'label', '*.png'))
- # self.transforms = transform
- # self.transforms_2 = transform_2
- # self.as_tensor = t.ToTensor()
-
- def __getitem__(self, index):
- # 使用 albumentations 库进行数据增强
- # images1 = np.array(imageio.v3.imread(self.img_path_A[index]))/255
- # labels = np.array(imageio.v3.imread(self.mask_path[index])) / 255
- # images2 = np.array(imageio.v3.imread(self.img_path_B[index]))/255
- # # images1 = self.transforms_2(image1=images1, mask=labels)["image"]
- # # images2 = self.transforms_2(image1=images2, mask=labels)["image"]
- #
- # # augmented = self.transforms(image1=images1, image2=images2, mask=labels)
- # return torch.tensor(images1).to(torch.float32).permute(2, 0, 1), torch.tensor(
- # images2).to(
- # torch.float32).permute(2, 0, 1), torch.squeeze(
- # torch.tensor(labels)).to(torch.int64) # 返回数据还有标签
-
- # pytorch自带数据增强
- # images1 = transform_train(np.array(Image.open(self.img_path_A[index]).resize(res_shape, Image.BILINEAR)))
- # images2 = transform_train(np.array(Image.open(self.img_path_B[index]).resize(res_shape, Image.BILINEAR)))
- # labels = np.array(Image.open(self.mask_path[index]).resize(res_shape, Image.BILINEAR))
-
- # 使用PIL读取正常png和tif图像
- # images1 = transform_train(np.array(Image.open(self.img_path_A[index])))
- # images2 = transform_train(np.array(Image.open(self.img_path_B[index])))
- # labels = np.array(Image.open(self.mask_path[index]))
- #
- # return images1, images2, labels
- images1, images2, labels = data_agu(np.array(Image.open(self.img_path_A[index])),
- np.array(Image.open(self.img_path_B[index])),
- np.array(Image.open(self.mask_path[index])))
-
- images1 = transform_train(images1)
- images2 = transform_train(images2)
- - labels = np.array(labels)
- + labels = np.array(labels)/255
- return images1, images2, labels
-
- def __len__(self):
- return len(self.img_path_A) # 返回数据的总个数
-
-
- def Mydataset_collate(batch):
- images1 = []
- images2 = []
- masks = []
- for images1, images2, masks in batch:
- images1.append(images1)
- images2.append(images2)
- masks.append(masks)
- images1 = np.array(images1)
- images2 = np.array(images2)
- masks = np.array(masks)
- return images1, images2, masks
|