|
- # Copyright 2022 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """FCOS EVAL"""
- import json
- import argparse
- from tqdm import tqdm
- import mindspore
- from mindspore import context
- from pycocotools.coco import COCO
- from pycocotools.cocoeval import COCOeval
-
- from src.fcos import FCOSDetector
- from src.eval_utils import post_process
- from src.eval_utils import ClipBoxes
- from src import COCO_dataset
-
- def _has_only_empty_bbox(annot):
- return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in annot)
-
- def _has_valid_annotation(annot):
- if annot is None:
- return False
-
- if _has_only_empty_bbox(annot):
- return False
-
- return True
-
- def evaluate_coco(val_dataset, _model, threshold=0.05):
- coco = COCO(opt.anno_path)
- ids = list(sorted(coco.imgs.keys()))
- new_ids = []
- for i in ids:
- ann_id = coco.getAnnIds(imgIds=i, iscrowd=None)
- ann = coco.loadAnns(ann_id)
- if _has_valid_annotation(ann):
- new_ids.append(i)
- ids = new_ids
- category2id = {v: i + 1 for i, v in enumerate(coco.getCatIds())}
- id2category = {v: k for k, v in category2id.items()}
- results = []
- image_ids = []
- index = 0
- for item in tqdm(val_dataset.create_dict_iterator()):
- img = item["img"]
- scale = item["scales"].asnumpy()
- scores, labels, boxes = _model(img)
- scores, labels, boxes = post_process([scores.asnumpy(), labels.asnumpy(), boxes.asnumpy()], 0.05, 0.6)
- boxes = ClipBoxes(img, boxes)
- boxes /= scale
- boxes[:, :, 2] -= boxes[:, :, 0]
- boxes[:, :, 3] -= boxes[:, :, 1]
-
- for box, score, label in zip(boxes[0], scores[0], labels[0]):
- if score < threshold:
- break
- image_result = {
- 'image_id': ids[index],
- 'category_id': id2category[label],
- 'score': float(score),
- 'bbox': box.tolist(),
- }
- results.append(image_result)
- image_ids.append(ids[index])
- index += 1
- json.dump(results, open('coco_bbox_results.json', 'w'), indent=4)
- coco_pred = coco.loadRes('coco_bbox_results.json')
- # run COCO evaluation
- coco_eval = COCOeval(coco, coco_pred, 'bbox')
- coco_eval.params.imgIds = image_ids
- coco_eval.evaluate()
- coco_eval.accumulate()
- coco_eval.summarize()
- return coco_eval.stats
-
- parser = argparse.ArgumentParser()
- parser.add_argument("--device_id", type=int, default=5, help="DEVICE_ID to run ")
- parser.add_argument("--eval_path", type=str, default="/data/coco/val2017")
- parser.add_argument("--anno_path", type=str, default="/data/coco/annotations/instances_val2017.json")
- parser.add_argument("--ckpt_path", type=str, default="/data1/FCOS/checkpoint/backbone/s1.ckpt")
- opt = parser.parse_args()
- if __name__ == "__main__":
- context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=opt.device_id)
- val_dataset, _ = COCO_dataset.create_coco_dataset(opt.eval_path, opt.anno_path, 1, shuffle=False, train=False,
- transform=None, num_parallel_workers=1)
- model = FCOSDetector(mode="inference")
- model.set_train(False)
- mindspore.load_param_into_net(model, mindspore.load_checkpoint(opt.ckpt_path))
- evaluate_coco(val_dataset, model)
|