|
- # Copyright 2022 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """post process for 310 inference"""
- import argparse
- import mindspore as ms
- import mindspore.dataset as ds
- import mindspore.dataset.vision.c_transforms as vision
- import mindspore.dataset.transforms.c_transforms as C
- from mindspore import nn
- from mindspore import dtype as mstype
-
- from src.regnet import regnet20
-
- parser = argparse.ArgumentParser(description="resnet inference")
- parser.add_argument("--result_path", type=str, required=True, help="result files path.")
- parser.add_argument("--label_path", type=str, required=True, help="image file path.")
- args_opt = parser.parse_args()
-
- batch_size = 1
- num_classes = 1000
-
-
- def evaluate_dataset(data_path, repeat_num=1, rank_id=0, rank_size=1, batch_num=1):
- resize_height = 32
- resize_width = 32
- rescale = 1.0 / 255.0
- shift = 0.0
-
- data_set = ds.Cifar10Dataset(data_path, num_shards=rank_size, shard_id=rank_id, usage='test')
-
- resize_op = vision.Resize((resize_height, resize_width))
- rescale_op = vision.Rescale(rescale, shift)
- normalize_op = vision.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023))
- changeswap_op = vision.HWC2CHW()
- type_cast_op = C.TypeCast(mstype.int32)
- c_trans = [resize_op, rescale_op, normalize_op, changeswap_op]
-
- data_set = data_set.map(operations=type_cast_op, input_columns="label")
- data_set = data_set.map(operations=c_trans, input_columns="image")
- data_set = data_set.batch(batch_size=batch_num, drop_remainder=False)
- data_set = data_set.repeat(repeat_num)
-
- return data_set
-
-
- def get_result(args):
- config = vars(args)
- device_id = config['device_id']
- ms.set_context(mode=ms.GRAPH_MODE, device_target=config['device_target'], save_graphs=False,
- device_id=device_id)
- if ms.get_context("device_target") == "GPU":
- ms.set_context(enable_graph_kernel=True, graph_kernel_flags="--enable_parallel_fusion")
- image_size = config['image_size']
- class_num = config['class_num']
- dataset_path = config['dataset_path']
- rank_size = config['rank_size']
- dataset = evaluate_dataset(data_path=dataset_path, repeat_num=1, rank_id=device_id,
- rank_size=rank_size, batch_num=config['batch_size'])
- ds_eval = dataset
- print('dataset size is : \n', ds_eval.get_dataset_size())
- regnet = regnet20(batch_size=config['batch_size'], im_size=image_size, class_num=class_num)
- regnet.set_train(False)
- param_dict = ms.load_checkpoint(config['checkpoint_path'])
- print('Load trained model done. {}'.format(config['checkpoint_path']))
- regnet.init_parameters_data()
- ms.load_param_into_net(regnet, param_dict)
- top1_acc = nn.Top1CategoricalAccuracy()
- for data in ds_eval.create_dict_iterator():
- logits = regnet(data['image'])
- top1_acc.update(logits, data['label'])
- acc = top1_acc.eval()
- print('top-1: %s%%' % acc)
-
-
- if __name__ == '__main__':
- get_result(args=args_opt)
|