|
- # Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing)
- enable_modelarts: False
- data_url: ""
- train_url: ""
- checkpoint_url: ""
- data_path: "/cache/data"
- output_path: "/zzq_data/mycode/hrnetv2_mindspore/hrnetv2_mindspore/HRNetV2_det/hrnetv2p_fastercnn/cancel"
- load_path: "/cache/checkpoint_path"
- device_target: GPU
- enable_profiling: False
-
- # ==============================================================================
- # config
- img_width: 1280
- img_height: 768
- # The value of keep_ratio must be the same as the [KEEP_RATIO] parameter in Ascend310 inference
- keep_ratio: True
- flip_ratio: 0.5
- expand_ratio: 1.0
-
- # loss
- without_bg_loss: True
-
- # hrnetv2 W32
- FINAL_CONV_KERNEL: 1
- STAGE1_NUM_MODULES: 1
- STAGE1_NUM_BRANCHES: 1
- STAGE1_BLOCK: "BOTTLENECK"
- STAGE1_NUM_BLOCKS: [4]
- STAGE1_NUM_CHANNELS: [64]
- STAGE1_FUSE_METHOD: "SUM"
-
- STAGE2_NUM_MODULES: 1
- STAGE2_NUM_BRANCHES: 2
- STAGE2_BLOCK: "BASIC"
- STAGE2_NUM_BLOCKS: [4, 4]
- STAGE2_NUM_CHANNELS: [32, 64]
- STAGE2_FUSE_METHOD: "SUM"
-
- STAGE3_NUM_MODULES: 4
- STAGE3_NUM_BRANCHES: 3
- STAGE3_BLOCK: "BASIC"
- STAGE3_NUM_BLOCKS: [4, 4, 4]
- STAGE3_NUM_CHANNELS: [32, 64, 128]
- STAGE3_FUSE_METHOD: "SUM"
-
- STAGE4_NUM_MODULES: 3
- STAGE4_NUM_BRANCHES: 4
- STAGE4_BLOCK: "BASIC"
- STAGE4_NUM_BLOCKS: [4, 4, 4, 4]
- STAGE4_NUM_CHANNELS: [32, 64, 128, 256]
- STAGE4_FUSE_METHOD: "SUM"
-
- # anchor
- anchor_scales: [8]
- anchor_ratios: [0.5, 1.0, 2.0]
- anchor_strides: [4, 8, 16, 32, 64]
- num_anchors: 3
-
-
- # fpn
- fpn_in_channels: [32, 64, 128, 256] # fixme look here!!! !!!
- fpn_out_channels: 256 # fixme look here!!! !!!
- fpn_num_outs: 5 # fixme look here!!! !!!
-
- # rpn
- rpn_in_channels: 256
- rpn_feat_channels: 256
- rpn_loss_cls_weight: 1.0
- rpn_loss_reg_weight: 1.0
- rpn_cls_out_channels: 1
- rpn_target_means: [0., 0., 0., 0.] # fixme!
- rpn_target_stds: [1.0, 1.0, 1.0, 1.0] # fixme!
-
- # bbox_assign_sampler
- neg_iou_thr: 0.3
- pos_iou_thr: 0.7
- min_pos_iou: 0.3
- num_gts: 128
- num_expected_neg: 256
- num_expected_pos: 128
-
- # proposal
- activate_num_classes: 2
- use_sigmoid_cls: True
-
- # roi_align
- roi_layer: {type: 'RoIAlign', out_size: 7, sample_num: 2}
- roi_align_out_channels: 256
- roi_align_featmap_strides: [4, 8, 16, 32]
- roi_align_finest_scale: 56
- roi_sample_num: 640
-
- # bbox_assign_sampler_stage2
- neg_iou_thr_stage2: 0.5
- pos_iou_thr_stage2: 0.5
- min_pos_iou_stage2: 0.5
- num_bboxes_stage2: 2000
- num_expected_pos_stage2: 128
- num_expected_neg_stage2: 512
- num_expected_total_stage2: 512
-
- # rcnn
- rcnn_num_layers: 2
- rcnn_in_channels: 256
- rcnn_fc_out_channels: 1024
- rcnn_loss_cls_weight: 1
- rcnn_loss_reg_weight: 1
- rcnn_target_means: [0., 0., 0., 0.]
- rcnn_target_stds: [0.1, 0.1, 0.2, 0.2]
-
- # train proposal
- rpn_proposal_nms_across_levels: False
- rpn_proposal_nms_pre: 2000
- rpn_proposal_nms_post: 2000
- rpn_proposal_max_num: 2000
- rpn_proposal_nms_thr: 0.7
- rpn_proposal_min_bbox_size: 0
-
- # test proposal
- rpn_nms_across_levels: False
- rpn_nms_pre: 1000
- rpn_nms_post: 1000
- rpn_max_num: 1000
- rpn_nms_thr: 0.7
- rpn_min_bbox_min_size: 0
- test_score_thr: 0.05
- test_iou_thr: 0.5
- test_max_per_img: 100
- test_batch_size: 2
-
- rpn_head_use_sigmoid: True
- rpn_head_weight: 1.0
-
- # LR
- base_lr: 0.04
- warmup_step: 500
- warmup_ratio: 0.0625
- sgd_step: [8, 11]
- sgd_momentum: 0.9
- lr_type: "dynamic"
-
- # train
- batch_size: 2
- loss_scale: 256
- momentum: 0.91
- weight_decay: 0.00001
- epoch_size: 20
- run_eval: False
- interval: 1
- save_checkpoint: True
- save_checkpoint_epochs: 1
- keep_checkpoint_max: 5
- save_checkpoint_path: "./"
- opt_type: "sgd"
- finetune: False
-
- # Number of threads used to process the dataset in parallel
- num_parallel_workers: 8
- # Parallelize Python operations with multiple worker processes
- python_multiprocessing: True
- mindrecord_dir: "/zzq_data/mycode/hrnetv2_mindspore/hrnetv2_mindspore/HRNetV2_det/hrnetv2p_fastercnn/cancel"
- # fixme "/disk2/dataset/COCO2017/MindRecord_COCO_TRAIN"
- coco_root: "/zzq_data/mmdetection-master/data/coco/"
- # fixme "/disk2/dataset/COCO2017"
- train_data_type: "val2017"
- # fixme train2017
- val_data_type: "val2017"
- instance_set: "annotations/instances_{}.json"
- coco_classes: ['background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
- 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
- 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
- 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
- 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
- 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
- 'kite', 'baseball bat', 'baseball glove', 'skateboard',
- 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
- 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
- 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
- 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
- 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
- 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
- 'refrigerator', 'book', 'clock', 'vase', 'scissors',
- 'teddy bear', 'hair drier', 'toothbrush']
- num_classes: 81
- prefix: ""
-
- # train.py FasterRcnn training
- run_distribute: False
- dataset: "coco"
- pre_trained: ""
- device_id: 0
- device_num: 1
- rank_id: 0
- image_dir: ''
- anno_path: '/zzq_data/mmdetection-master/data/coco/annotations/instances_val2017.json'
- backbone: 'hrnetv2p'
- log_summary: False
- grad_clip: False
-
- # eval.py FasterRcnn evaluation
- checkpoint_path: "/zzq_data/mycode/hrnetv2_mindspore/hrnetv2_mindspore/HRNetV2_det/hrnetv2p_fastercnn/ckpt_0/faster_rcnn_2-20_2500.ckpt"
-
- # export.py fasterrcnn_export
- file_name: "faster_rcnn"
- file_format: "MINDIR"
- ckpt_file: "/cache/train/fasterrcnn/faster_rcnn-12_7393.ckpt"
-
- # postprocess ("./src/config_50.yaml")
- result_path: ''
-
- ---
- # Config description for each option
- enable_modelarts: 'Whether training on modelarts, default: False'
- data_url: 'Dataset url for obs'
- train_url: 'Training output url for obs'
- data_path: 'Dataset path for local'
- output_path: 'Training output path for local'
- result_dir: "result files path."
- label_dir: "image file path."
-
- device_target: "device where the code will be implemented, default is Ascend"
- file_name: "output file name."
- dataset: "Dataset, either cifar10 or imagenet2012"
- parameter_server: 'Run parameter server train'
- width: 'input width'
- height: 'input height'
- enable_profiling: 'Whether enable profiling while training, default: False'
- only_create_dataset: 'If set it true, only create Mindrecord, default is false.'
- run_distribute: 'Run distribute, default is false.'
- do_train: 'Do train or not, default is true.'
- do_eval: 'Do eval or not, default is false.'
- pre_trained: 'Pretrained checkpoint path'
- device_id: 'Device id, default is 0.'
- device_num: 'Use device nums, default is 1.'
- rank_id: 'Rank id, default is 0.'
- file_format: 'file format'
- checkpoint_path: "Checkpoint file path."
- ckpt_file: 'fasterrcnn ckpt file.'
- result_path: "result file path."
- backbone: "backbone network name, options:resnet_v1_50, resnet_v1.5_50, resnet_v1_101, resnet_v1_152"
- interval: "val interval"
-
- ---
- device_target: ['Ascend', 'GPU', 'CPU']
- file_format: ["AIR", "ONNX", "MINDIR"]
- dataset_name: ["cifar10", "imagenet2012"]
|