|
- # Copyright 2020 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """
- create train or eval dataset.
- """
- import os
- import mindspore.common.dtype as mstype
- import mindspore.dataset.engine as de
- import mindspore.dataset.vision.c_transforms as C
- import mindspore.dataset.transforms.c_transforms as C2
- from mindspore.communication.management import init, get_rank, get_group_size
- from mindspore.mindrecord.tools.cifar10_to_mr import Cifar10ToMR
- import pickle
- import numpy as np
-
- class GetDatasetGenerator:
- def __init__(self, root, train=True):
- self.root = root
- self.train = train
- self.data = []
- self.targets = []
- if self.train:
- train_list = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
- else:
- train_list = ['test_batch']
- for file_name in train_list:
- file_path = os.path.join(self.root, 'cifar-10-batches-py', file_name)
- with open(file_path, 'rb') as f:
- entry = pickle.load(f, encoding='latin1')
- self.data.append(entry['data'])
- self.targets.extend(entry['labels'])
-
- self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
- self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
-
- def __getitem__(self, index):
- # return (mindspore.Tensor(self.data[index], dtype=mindspore.uint8), mindspore.Tensor(self.targets[index], dtype=mindspore.uint32))
- return (self.data[index], self.targets[index])
-
- def __len__(self):
- return len(self.data)
-
- def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
- """
- create a train or evaluate cifar10 dataset for resnet50
- Args:
- dataset_path(string): the path of dataset.
- do_train(bool): whether dataset is used for train or eval.
- repeat_num(int): the repeat times of dataset. Default: 1
- batch_size(int): the batch size of dataset. Default: 32
- target(str): the device target. Default: Ascend
-
- Returns:
- dataset
- """
- if target == "Ascend":
- device_num, rank_id = _get_rank_info()
- else:
- pass
- # init()
- # rank_id = get_rank()
- # device_num = get_group_size()
-
- # if do_train:
- # device_num = 2
- # if device_num == 1:
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
- # else:
- # rank_id = int(open(dataset_path+"/rank-id.txt").read())
- # print(f'------Device num is : {device_num}---------')
- # print(f'------Rank Id is : {rank_id}---------')
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True,
- # num_shards=device_num, shard_id=rank_id)
- # else:
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
-
- ds = de.GeneratorDataset(GetDatasetGenerator(dataset_path, do_train), ['image', 'label'], num_parallel_workers=8,
- shuffle=do_train)
-
- # define map operations
- trans = []
- if do_train:
- trans += [
- C.RandomCrop((32, 32), (4, 4, 4, 4)),
- C.RandomHorizontalFlip(prob=0.5)
- ]
-
- # trans += [
- # C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
- # C.HWC2CHW()
- # ]
- trans += [
- C.Rescale(1.0 / 255.0, 0.0),
- C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
- C.HWC2CHW()
- ]
-
- type_cast_op = C2.TypeCast(mstype.int32)
-
- ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
- ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
-
-
- # apply batch operations
- ds = ds.batch(batch_size, drop_remainder=True)
- # apply dataset repeat operation
- ds = ds.repeat(repeat_num)
-
- return ds
-
-
- # def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
- # """
- # create a train or evaluate cifar10 dataset for resnet50
- # Args:
- # dataset_path(string): the path of dataset.
- # do_train(bool): whether dataset is used for train or eval.
- # repeat_num(int): the repeat times of dataset. Default: 1
- # batch_size(int): the batch size of dataset. Default: 32
- # target(str): the device target. Default: Ascend
- #
- # Returns:
- # dataset
- # """
- # if target == "Ascend":
- # device_num, rank_id = _get_rank_info()
- # else:
- # pass
- # # init()
- # # rank_id = get_rank()
- # # device_num = get_group_size()
- #
- # if do_train:
- # device_num = 2
- # if device_num == 1:
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
- # else:
- # rank_id = int(open(dataset_path+"/rank-id.txt").read())
- # print(f'------Device num is : {device_num}---------')
- # print(f'------Rank Id is : {rank_id}---------')
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True,
- # num_shards=device_num, shard_id=rank_id)
- # else:
- # ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
- #
- # # define map operations
- # trans = []
- # if do_train:
- # trans += [
- # C.RandomCrop((32, 32), (4, 4, 4, 4)),
- # C.RandomHorizontalFlip(prob=0.5)
- # ]
- #
- # # trans += [
- # # C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
- # # C.HWC2CHW()
- # # ]
- # trans += [
- # C.Resize((64, 64)),
- # C.Rescale(1.0 / 255.0, 0.0),
- # C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
- # C.HWC2CHW()
- # ]
- #
- # type_cast_op = C2.TypeCast(mstype.int32)
- #
- # ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
- # ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
- #
- #
- # # apply batch operations
- # ds = ds.batch(batch_size, drop_remainder=True)
- # # apply dataset repeat operation
- # ds = ds.repeat(repeat_num)
- #
- # return ds
-
-
- def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
- """
- create a train or eval imagenet2012 dataset for resnet50
-
- Args:
- dataset_path(string): the path of dataset.
- do_train(bool): whether dataset is used for train or eval.
- repeat_num(int): the repeat times of dataset. Default: 1
- batch_size(int): the batch size of dataset. Default: 32
- target(str): the device target. Default: Ascend
-
- Returns:
- dataset
- """
- if target == "Ascend":
- device_num, rank_id = _get_rank_info()
- else:
- init()
- rank_id = get_rank()
- device_num = get_group_size()
-
- if device_num == 1:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
- else:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
- num_shards=device_num, shard_id=rank_id)
-
- image_size = 224
- mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
- std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
-
- # define map operations
- if do_train:
- trans = [
- C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
- C.RandomHorizontalFlip(prob=0.5),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
- else:
- trans = [
- C.Decode(),
- C.Resize(256),
- C.CenterCrop(image_size),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
-
- type_cast_op = C2.TypeCast(mstype.int32)
-
- ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
- ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
-
- # apply batch operations
- ds = ds.batch(batch_size, drop_remainder=True)
-
- # apply dataset repeat operation
- ds = ds.repeat(repeat_num)
-
- return ds
-
-
- def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
- """
- create a train or eval imagenet2012 dataset for resnet101
- Args:
- dataset_path(string): the path of dataset.
- do_train(bool): whether dataset is used for train or eval.
- repeat_num(int): the repeat times of dataset. Default: 1
- batch_size(int): the batch size of dataset. Default: 32
-
- Returns:
- dataset
- """
- device_num, rank_id = _get_rank_info()
-
- if device_num == 1:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
- else:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
- num_shards=device_num, shard_id=rank_id)
- image_size = 224
- mean = [0.475 * 255, 0.451 * 255, 0.392 * 255]
- std = [0.275 * 255, 0.267 * 255, 0.278 * 255]
-
- # define map operations
- if do_train:
- trans = [
- C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
- C.RandomHorizontalFlip(rank_id / (rank_id + 1)),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
- else:
- trans = [
- C.Decode(),
- C.Resize(256),
- C.CenterCrop(image_size),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
-
- type_cast_op = C2.TypeCast(mstype.int32)
-
- ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)
- ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
-
- # apply batch operations
- ds = ds.batch(batch_size, drop_remainder=True)
- # apply dataset repeat operation
- ds = ds.repeat(repeat_num)
-
- return ds
-
-
- def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
- """
- create a train or eval imagenet2012 dataset for se-resnet50
-
- Args:
- dataset_path(string): the path of dataset.
- do_train(bool): whether dataset is used for train or eval.
- repeat_num(int): the repeat times of dataset. Default: 1
- batch_size(int): the batch size of dataset. Default: 32
- target(str): the device target. Default: Ascend
-
- Returns:
- dataset
- """
- if target == "Ascend":
- device_num, rank_id = _get_rank_info()
- if device_num == 1:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True)
- else:
- ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True,
- num_shards=device_num, shard_id=rank_id)
- image_size = 224
- mean = [123.68, 116.78, 103.94]
- std = [1.0, 1.0, 1.0]
-
- # define map operations
- if do_train:
- trans = [
- C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
- C.RandomHorizontalFlip(prob=0.5),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
- else:
- trans = [
- C.Decode(),
- C.Resize(292),
- C.CenterCrop(256),
- C.Normalize(mean=mean, std=std),
- C.HWC2CHW()
- ]
-
- type_cast_op = C2.TypeCast(mstype.int32)
- ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=12)
- ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
-
- # apply batch operations
- ds = ds.batch(batch_size, drop_remainder=True)
-
- # apply dataset repeat operation
- ds = ds.repeat(repeat_num)
-
- return ds
-
-
- def _get_rank_info():
- """
- get rank size and rank id
- """
- rank_size = int(os.environ.get("RANK_SIZE", 1))
-
- if rank_size > 1:
- rank_size = get_group_size()
- rank_id = get_rank()
- else:
- rank_size = 1
- rank_id = 0
-
- return rank_size, rank_id
|