|
- # Copyright 2020 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # less required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
-
- """train FasterRcnn and get checkpoint files."""
-
- import os
- import time
- import numpy as np
-
- import mindspore.common.dtype as mstype
- from mindspore import context, Tensor, Parameter
- from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor
- from mindspore.train import Model
- from mindspore.context import ParallelMode
- from mindspore.train.serialization import load_checkpoint, load_param_into_net
- from mindspore.nn import SGD
- from mindspore.common import set_seed
- from mindspore.communication.management import init, get_rank, get_group_size
-
- from src.FasterRcnn.faster_rcnn_r50 import Faster_Rcnn_Resnet50
- from src.network_define import LossCallBack, WithLossCell, TrainOneStepCell, LossNet
- # from src.config import config
- from src.model_utils.config import config
- from src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset
- from src.lr_schedule import dynamic_lr
- from src.model_utils.moxing_adapter import moxing_wrapper
- from src.model_utils.device_adapter import get_device_id, get_device_num
-
- set_seed(1)
-
-
- context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=get_device_id())
-
-
- def modelarts_pre_process():
- '''modelarts pre process function.'''
- def unzip(zip_file, save_dir):
- import zipfile
- s_time = time.time()
- if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
- zip_isexist = zipfile.is_zipfile(zip_file)
- if zip_isexist:
- fz = zipfile.ZipFile(zip_file, 'r')
- data_num = len(fz.namelist())
- print("Extract Start...")
- print("unzip file num: {}".format(data_num))
- data_print = int(data_num / 100) if data_num > 100 else 1
- i = 0
- for file in fz.namelist():
- if i % data_print == 0:
- print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
- i += 1
- fz.extract(file, save_dir)
- print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
- int(int(time.time() - s_time) % 60)))
- print("Extract Done.")
- else:
- print("This is not zip.")
- else:
- print("Zip has been extracted.")
-
- if config.need_modelarts_dataset_unzip:
- zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
- save_dir_1 = os.path.join(config.data_path)
- files = os.listdir(config.data_path)
- for f in files:
- print(f)
- sync_lock = "/tmp/unzip_sync.lock"
-
- # Each server contains 8 devices as most.
- if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
- # ===========strat unzip==================================#
- print("Zip file path: ", zip_file_1)
- print("Unzip file save dir: ", save_dir_1)
- unzip(zip_file_1, save_dir_1)
- print("===Finish extract data synchronization===")
- # if not os.path.exists(sync_lock):
- try:
- os.mknod(sync_lock)
- except IOError:
- pass
-
- while True:
- if os.path.exists(sync_lock):
- break
- time.sleep(1)
- # print("Finish sync unzip data from {} to {}.".format(zip_file_1, save_dir_1))
- print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
- config.save_checkpoint_path = config.output_path
- print("save_checkpoint_path:")
- print(config.save_checkpoint_path)
-
-
- @moxing_wrapper(pre_process=modelarts_pre_process)
- def train_cascade_rcnn():
- load_path = config.pre_trained
- if config.enable_modelarts:
- load_path = config.pre_trained
- if config.enable_modelarts:
- model_name = os.listdir(config.load_path)[0]
- load_path = os.path.join(config.load_path, model_name)
-
-
- print("Start create dataset!")
- # It will generate mindrecord file in args_opt.mindrecord_dir,
- # and the file name is FasterRcnn.mindrecord0, 1, ... file_num.
- prefix = "FasterRcnn.mindrecord"
- mindrecord_dir = config.mindrecord_dir
- mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
- print("CHECKING MINDRECORD FILES DONE!")
-
- loss_scale = float(config.loss_scale)
- if rank == 0 and not os.path.exists(mindrecord_file + ".db"):
- if not os.path.isdir(mindrecord_dir):
- os.makedirs(mindrecord_dir)
- if config.dataset == "coco":
- if os.path.isdir(config.coco_root):
- if not os.path.exists(config.coco_root):
- print("Please make sure config:coco_root is valid.")
- raise ValueError(config.coco_root)
- print("Create Mindrecord. It may take some time.")
- data_to_mindrecord_byte_image(config, "coco", True, prefix)
- print("Create Mindrecord Done, at {}".format(mindrecord_dir))
- else:
- print("coco_root not exits.")
- else:
- if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):
- if not os.path.exists(config.image_dir):
- print("Please make sure config:image_dir is valid.")
- raise ValueError(config.image_dir)
- print("Create Mindrecord. It may take some time.")
- data_to_mindrecord_byte_image(config, "other", True, prefix)
- print("Create Mindrecord Done, at {}".format(mindrecord_dir))
- else:
- print("image_dir or anno_path not exits.")
-
- while not os.path.exists(mindrecord_file + ".db"):
- time.sleep(5)
-
- print("CHECKING MINDRECORD FILES DONE!")
-
- # When create MindDataset, using the fitst mindrecord file, such as FasterRcnn.mindrecord0.
- dataset = create_fasterrcnn_dataset(mindrecord_file, batch_size=config.batch_size,
- device_num=device_num, rank_id=rank)
-
- dataset_size = dataset.get_dataset_size()
- print("Create dataset done!")
-
- net = Faster_Rcnn_Resnet50(config=config)
- net = net.set_train()
- if load_path != "":
- param_dict = load_checkpoint(load_path)
- for item in list(param_dict.keys()):
- if not item.startswith('backbone'):
- param_dict.pop(item)
-
- for key, value in param_dict.items():
- tensor = value.asnumpy().astype(np.float32)
- param_dict[key] = Parameter(tensor, key)
- print("param_dict.keys():")
- print(param_dict.keys())
- load_param_into_net(net, param_dict)
-
- loss = LossNet()
- lr = Tensor(dynamic_lr(config, dataset_size), mstype.float32)
-
- opt = SGD(params=net.trainable_params(), learning_rate=lr, momentum=float(config.momentum),
- weight_decay=float(config.weight_decay), loss_scale=float(config.loss_scale))
- net_with_loss = WithLossCell(net, loss)
- if config.run_distribute:
- net = TrainOneStepCell(net_with_loss, net, opt, sens=config.loss_scale, reduce_flag=True,
- mean=True, degree=device_num)
- else:
- net = TrainOneStepCell(net_with_loss, net, opt, sens=config.loss_scale)
-
- log_dir = './log_4_14_master_class.log'
- time_cb = TimeMonitor(data_size=dataset_size)
- loss_cb = LossCallBack(dir=log_dir, rank_id=rank)
- cb = [time_cb, loss_cb]
- if config.save_checkpoint:
- ckptconfig = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * dataset_size,
- keep_checkpoint_max=config.keep_checkpoint_max)
- save_checkpoint_path = os.path.join(config.save_checkpoint_path, "ckpt_" + str(rank) + "/")
- ckpoint_cb = ModelCheckpoint(prefix='faster_rcnn', directory=save_checkpoint_path, config=ckptconfig)
- cb += [ckpoint_cb]
-
- model = Model(net)
- model.train(config.epoch_size, dataset, callbacks=cb, dataset_sink_mode=False)
-
-
- if __name__ == '__main__':
- # if not os.path.exists(config.mindrecord_dir):
- # os.makedirs(config.mindrecord_dir)
- if config.run_distribute:
- init()
- rank = get_rank()
- device_num = get_group_size()
- context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- init()
- else:
- rank = 0
- device_num = 1
- train_cascade_rcnn()
|