|
- # Copyright 2020 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """
- ######################## train alexnet example ########################
- train alexnet and get network model files(.ckpt) :
- python finetune.py --data_path /YourDataPath
- """
-
- import ast
- import argparse
- import os
- from src.finetune.config import alexnet_imagenet_cfg
- from src.dataset import create_finetune_dataset
- from src.alexnet.generator_lr import get_lr_imagenet, Finetune_Loss
- from src.alexnet.alexnet import alexnet_finetune
- from src.alexnet.get_param_groups import get_param_groups
- import mindspore.nn as nn
- from mindspore.communication.management import init, get_rank
- from mindspore import dataset as de
- from mindspore import context
- from mindspore import Tensor
- from mindspore.train import Model
- from mindspore.context import ParallelMode
- from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor, Callback
- from mindspore.common import set_seed
- import moxing as mox
-
- set_seed(1)
- de.config.set_seed(1)
-
- class Monitor(Callback):
- def __init__(self,lr_init=None):
- super(Monitor, self).__init__()
- self.lr_init=lr_init
- self.lr_init_len=len(lr_init)
- def step_end(self, run_context):
- cb_params=run_context.original_args()
- print("lr:[{:8.6f}]".format(self.lr_init[cb_params.cur_step_num-1]),flush=True)
-
- #数据是src/data/finetune/train/fine_data.pickle,存id:框
- local_predata_url='./cache/predata'
- #VOC2007数据集
- local_jpeg_root_url='./cache/jpeg_root'
- #pretrain_ckpt的位置
- local_preckpt_url='./cache/preckpt'
- #训练结果存放位置
- local_ckpt_url='./cache/ckpt'
-
-
- if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='MindSpore AlexNet Example')
- parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'],
- help='device where the code will be implemented (default: Ascend)')
- parser.add_argument('--sink_size', type=int, default=-1, help='control the amount of data in each sink')
- parser.add_argument('--jpeg_root_url', type=str, default="src/data/VOCdevkit/VOC2007/JPEGImages", help='path where the dataset is saved')
- parser.add_argument('--ckpt_url', type=str, default="./ckpt", help='if is test, must provide\
- path where the trained ckpt file')
- parser.add_argument('--dataset_sink_mode', type=ast.literal_eval,
- default=True, help='dataset_sink_mode is False or True')
- parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
- args_opt = parser.parse_args()
-
- #看是否多卡
- device_num = int(os.getenv("RANK_SIZE"))
- device_id = int(os.getenv("DEVICE_ID"))
- #rank
-
- #################################local_preckpt_url='./cache/preckpt'
- data_path = 'obs://' ###########################local_data_url='./cache/data'
- preckpt_path = 'obs://hit-lmx/lmx/sort_rcnn/output'#######################
- mox.file.copy_parallel(data_path, local_predata_url)
- mox.file.copy_parallel(args_opt.jpeg_root_url, local_jpeg_root_url)
- mox.file.copy_parallel(preckpt_path, local_preckpt_url)
-
- cfg = alexnet_imagenet_cfg
- #运行平台
- device_target = args_opt.device_target
- context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
- context.set_context(save_graphs=False)
- #多卡的代码
- if device_target == "Ascend":
- context.set_context(device_id=device_id)
-
- if device_num > 1:
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- init()
- elif device_target == "GPU":
- if device_num > 1:
- init()
- context.reset_auto_parallel_context()
- context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
- gradients_mean=True)
- else:
- raise ValueError("Unsupported platform.")
- ##多卡代码结束
-
- #训练数据集创建
- ds_train = create_finetune_dataset(predata_path=local_predata_url, jpeg_root_path=local_jpeg_root_url,batch_size=cfg.batch_size,
- num_shards=device_num, shard_id=device_id, device_target=args_opt.device_target)
- #判断数据集是否为空
- if ds_train.get_dataset_size() == 0:
- raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
- ####################加载模型框架这里没写
- #inputs = inputs.squeeze(0) squeeze操作
- #labels = labels.squeeze(0)
- network,_,_=alexnet_finetune(cfg.num_classes, phase='train')
-
- loss_scale_manager = None
- metrics = None
- #Get the number of batches in an epoch.
- step_per_epoch = ds_train.get_dataset_size() if args_opt.sink_size == -1 else args_opt.sink_size
-
- loss = Finetune_Loss()
- lr = Tensor(get_lr_imagenet(cfg.learning_rate, cfg.epoch_size, step_per_epoch))
- opt = nn.Momentum(params=get_param_groups(network),
- learning_rate=lr,
- momentum=cfg.momentum,
- weight_decay=cfg.weight_decay,
- loss_scale=cfg.loss_scale)
-
- from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager
- if cfg.is_dynamic_loss_scale == 1:
- loss_scale_manager = DynamicLossScaleManager(init_loss_scale=65536, scale_factor=2, scale_window=2000)
- else:
- loss_scale_manager = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False)
-
- if device_target == "Ascend":
- model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, amp_level="O2", keep_batchnorm_fp32=False,
- loss_scale_manager=loss_scale_manager)
- elif device_target == "GPU":
- model = Model(network, loss_fn=loss, optimizer=opt, metrics=metrics, loss_scale_manager=loss_scale_manager)
- else:
- raise ValueError("Unsupported platform.")
-
- if device_num > 1:
- ckpt_save_dir = os.path.join(local_ckpt_url + "_" + str(get_rank()))
- else:
- ckpt_save_dir = local_ckpt_url
-
- time_cb = TimeMonitor(data_size=step_per_epoch)
- loss_cb = LossMonitor()
- cb = [time_cb, loss_cb]
- cb += [Monitor(lr_init=lr.asnumpy())]
- config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
- keep_checkpoint_max=cfg.keep_checkpoint_max)
- ckpoint_cb = ModelCheckpoint(prefix="alexnet_finetune", directory=ckpt_save_dir, config=config_ck)
- print("ckpt:::::::::::::::::::::::::::::::::::")
- if device_id == 0:
- cb += [ckpoint_cb]
- print("============== Starting Training ==============")
- model.train(cfg.epoch_size, ds_train, callbacks=cb,
- dataset_sink_mode=args_opt.dataset_sink_mode, sink_size=args_opt.sink_size)
- print("dfsdfsdf++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
- if device_id == 0:
- mox.file.copy_parallel(local_ckpt_url, args_opt.ckpt_url)
|