|
- """Use the PYNATIVE mode to train the network"""
- import os
- import random
- import time
- import numpy as np
-
- import mindspore as ms
- from mindspore.context import ParallelMode
- from mindspore import context, nn, Tensor, ops
- from mindspore.communication.management import init, get_rank, get_group_size
- #from mindcv.utils.amp import NoLossScaler, DynamicLossScaler, StaticLossScaler, auto_mixed_precision, all_finite
-
- from mindcv.models import create_model
- from mindcv.data import create_dataset, create_transforms, create_loader
- from mindcv.loss import create_loss
- from mindcv.optim import create_optimizer
- from mindcv.scheduler import create_scheduler
- from mindcv.utils import CheckpointManager
- from config import parse_args
-
- def set_seed(seed=2):
- np.random.seed(seed)
- random.seed(seed)
- ms.set_seed(seed)
-
- def train(args):
- """Train network."""
- set_seed()
-
- if args.enable_modelarts:
- from mindcv.utils.modelarts import sync_data
- args.data_dir = "/cache/data/"
- os.makedirs(args.data_dir, exist_ok=True)
- sync_data(args.data_url, args.data_dir)
- print(f"Data dir on modelarts:{os.listdir(args.data_dir)}")
- # update data_dir path for create_dataset
- if "imagenet" in os.listdir(args.data_dir):
- data_dir = os.path.join(args.data_dir, "imagenet")
- args.data_dir = data_dir
-
- # TODO:
- #rank, rank_size = args.rank, args.rank_size
- is_main_process = False
- if args.rank in [None, 0]:
- is_main_process = True
-
- # create dataset
- dataset_train = create_dataset(
- name=args.dataset,
- root=args.data_dir,
- split=args.train_split,
- shuffle=args.shuffle,
- num_samples=args.num_samples,
- num_shards=args.rank_size,
- shard_id=args.rank,
- num_parallel_workers=args.num_parallel_workers,
- download=args.dataset_download,
- num_aug_repeats=args.aug_repeats)
-
- if args.num_classes is None:
- num_classes = dataset_train.num_classes()
- else:
- num_classes = args.num_classes
-
- # create transforms
- transform_list = create_transforms(
- dataset_name=args.dataset,
- is_training=True,
- image_resize=args.image_resize,
- scale=args.scale,
- ratio=args.ratio,
- hflip=args.hflip,
- vflip=args.vflip,
- color_jitter=args.color_jitter,
- interpolation=args.interpolation,
- auto_augment=args.auto_augment,
- mean=args.mean,
- std=args.std,
- re_prob=args.re_prob,
- re_scale=args.re_scale,
- re_ratio=args.re_ratio,
- re_value=args.re_value,
- re_max_attempts=args.re_max_attempts
- )
-
- # load dataset
- loader_train = create_loader(
- dataset=dataset_train,
- batch_size=args.batch_size,
- drop_remainder=False,
- is_training=True,
- mixup=args.mixup,
- cutmix=args.cutmix,
- cutmix_prob=args.cutmix_prob,
- num_classes=num_classes,
- transform=transform_list,
- num_parallel_workers=args.num_parallel_workers,
- )
-
- if args.val_while_train and is_main_process:
- dataset_eval = create_dataset(
- name=args.dataset,
- root=args.data_dir,
- split=args.val_split,
- num_parallel_workers=args.num_parallel_workers,
- download=args.dataset_download)
-
- transform_list_eval = create_transforms(
- dataset_name=args.dataset,
- is_training=False,
- image_resize=args.image_resize,
- crop_pct=args.crop_pct,
- interpolation=args.interpolation,
- mean=args.mean,
- std=args.std
- )
-
- loader_eval = create_loader(
- dataset=dataset_eval,
- batch_size=args.batch_size,
- drop_remainder=False,
- is_training=False,
- transform=transform_list_eval,
- num_parallel_workers=args.num_parallel_workers,
- )
-
- num_batches = loader_train.get_dataset_size()
- # Train dataset count
- train_count = dataset_train.get_dataset_size()
-
- # create model
- network = create_model(model_name=args.model,
- num_classes=num_classes,
- in_channels=args.in_channels,
- drop_rate=args.drop_rate,
- drop_path_rate=args.drop_path_rate,
- pretrained=args.pretrained,
- checkpoint_path=args.ckpt_path)
-
- num_params = sum([param.size for param in network.get_parameters()])
-
- # create loss
- loss_fn = create_loss(name=args.loss,
- reduction=args.reduction,
- label_smoothing=args.label_smoothing,
- aux_factor=args.aux_factor)
-
- # create learning rate schedule
- lr_scheduler = create_scheduler(num_batches,
- scheduler=args.scheduler,
- lr=args.lr,
- min_lr=args.min_lr,
- warmup_epochs=args.warmup_epochs,
- warmup_factor=args.warmup_factor,
- decay_epochs=args.decay_epochs,
- decay_rate=args.decay_rate,
- milestones=args.multi_step_decay_milestones,
- num_epochs=args.epoch_size)
-
- # resume training if ckpt_path is given
- if args.ckpt_path != '' and args.resume_opt:
- opt_ckpt_path = os.path.join(args.ckpt_save_dir, f'optim_{args.model}.ckpt')
- else:
- opt_ckpt_path = ''
-
- # create optimizer
- optimizer = create_optimizer(network.trainable_params(),
- opt=args.opt,
- lr=lr_scheduler,
- weight_decay=args.weight_decay,
- momentum=args.momentum,
- nesterov=args.use_nesterov,
- filter_bias_and_bn=args.filter_bias_and_bn,
- #loss_scale=args.loss_scale,
- checkpoint_path=opt_ckpt_path)
-
- # amp
- # ms.amp.auto_mixed_precision(network, amp_level=args.amp_level)
- # TODO: auto_mixed_precision is changed in MS 1.9.1. used customed auto_mixed_precision to support customized blacklist
- #from mindcv.utils.amp import auto_mixed_precision
- #auto_mixed_precision(network, amp_level=args.amp_level)
-
- if args.distribute:
- mean = context.get_auto_parallel_context("gradients_mean")
- degree = context.get_auto_parallel_context("device_num")
- grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
- else:
- grad_reducer = ops.identity
-
- # set loss scale for mixed precision training
- # from mindspore.amp import StaticLossScaler
- # if args.amp_level != 'O0':
- # if args.dynamic_loss_scale:
- # #from mindcv.utils.amp import DynamicLossScaler
- # from mindspore.amp import DynamicLossScaler
- # loss_scaler = DynamicLossScaler(2**12, 2, 1000)
- # else:
- # # Fixs bugs in MS 1.8.1 (missing adjust)
- # #from mindcv.utils.amp import StaticLossScaler
- # loss_scaler = StaticLossScaler(args.loss_scale)
- # else:
- # from mindcv.utils.amp import NoLossScaler
- # #loss_scaler = NoLossScaler(1)
- # loss_scaler = StaticLossScaler(1)
- #from mindspore.amp import StaticLossScaler
- #loss_scaler = StaticLossScaler(args.loss_scale)
-
- # set up train step
- # from mindspore.amp import all_finite # Bugs before MindSpore 1.9.0
- # from mindcv.utils.all_finite import all_finite
-
- # Define forward function
- def forward_fn(data, label):
- logits = network(data)
- loss = loss_fn(logits, label)
- # loss = loss_scaler.scale(loss)
- return loss, logits
-
- # Get gradient function
- # TODO: MS 1.9.1 update value_and_grad, TODO: compare with rewritten value_and_grad
- grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters)
- # from mindcv.utils.grad import value_and_grad
- # grad_fn = value_and_grad(forward_fn, None, optimizer.parameters)
-
- @ms.ms_function
- def train_step(data, label):
- (loss, logits), grads = grad_fn(data, label)
- grads = grad_reducer(grads)
- # status = all_finite(grads)
- #loss = loss_scaler.unscale(loss)
- #grads = loss_scaler.unscale(grads)
- # if status:
- loss = ops.depend(loss, optimizer(grads))
-
- # TODO: adjust has no retrun in MS1.8.1, and is fixed in MS 1.9.1
- #loss = ops.depend(loss, loss_scaler.adjust(status))
- return loss, logits
-
- # resume
- begin_step = 0
- begin_epoch = 0
- if args.ckpt_path != '':
- begin_step = optimizer.global_step.asnumpy()[0]
- begin_epoch = args.ckpt_path.split('/')[-1].split('_')[0].split('-')[-1]
- begin_epoch = int(begin_epoch)
-
- # log
- if is_main_process:
- print(f"-" * 40)
- print(f"Num devices: {args.rank_size if args.rank_size is not None else 1} \n"
- f"Distributed mode: {args.distribute} \n"
- f"Num training samples: {train_count}")
- print(f"Num classes: {num_classes} \n"
- f"Num batches: {num_batches} \n"
- f"Batch size: {args.batch_size} \n"
- f"Auto augment: {args.auto_augment} \n"
- f"Model: {args.model} \n"
- f"Model param: {num_params} \n"
- f"Num epochs: {args.epoch_size} \n"
- f"Optimizer: {args.opt} \n"
- f"LR: {args.lr} \n"
- f"LR Scheduler: {args.scheduler}")
- print(f"-" * 40)
-
- if args.ckpt_path != '':
- print(f"Resume training from {args.ckpt_path}, last step: {begin_step}, last epoch: {begin_epoch}")
- else:
- print('Start training')
-
- if (not os.path.exists(args.ckpt_save_dir)) and (args.ckpt_save_dir != ''):
- os.makedirs(args.ckpt_save_dir)
-
- log_txt_fp = os.path.join(args.ckpt_save_dir, 'result.log')
- with open(log_txt_fp, 'w', encoding="utf-8") as fp:
- header = 'Epoch\tTrainLoss\tLR\tTime\tTop1\tTop5\n'
- fp.write(header)
-
- best_acc = 0
-
- # Training
- need_flush_from_cache = True
- assert (args.ckpt_save_policy != 'top_k' or args.val_while_train == True), \
- "ckpt_save_policy is top_k, val_while_train must be True."
- manager = CheckpointManager(ckpt_save_policy=args.ckpt_save_policy, async_save=False)
-
- log_interval = num_batches if args.log_interval is None else args.log_interval
-
- for epoch in range(begin_epoch, args.epoch_size):
- epoch_start = time.time()
-
- # train
- network.set_train()
- optimizer.set_train()
- total, correct = 0, 0
-
- for batch, (data, label) in enumerate(loader_train.create_tuple_iterator()):
- loss, logits = train_step(data, label)
- # loss_scaler.adjust(grad_finite)
-
- if (batch + 1) % log_interval == 0 or (batch + 1) >= num_batches:
- step = epoch * num_batches + batch
- if optimizer.dynamic_lr:
- cur_lr = optimizer.learning_rate(Tensor(step)).asnumpy()
- else:
- cur_lr = optimizer.learning_rate.asnumpy()
- print(f"Epoch:[{epoch+1}/{args.epoch_size}], "
- f"batch:[{batch+1}/{num_batches}], "
- f"loss:{loss.asnumpy():.6f}, lr: {cur_lr:.7f}, time:{time.time() - epoch_start:.6f}s")
- start = time.time()
-
- # val while train
- test_acc = Tensor(-1.0)
- if args.val_while_train and is_main_process:
- if ((epoch + 1) % args.val_interval == 0) or (epoch + 1 == args.epoch_size):
- val_start = time.time()
- network.set_train(False)
- optimizer.set_train(False)
- for data, label in loader_eval.create_tuple_iterator():
- pred = network(data)
- total += len(data)
- if len(label.shape) == 1:
- correct += (pred.argmax(1) == label).asnumpy().sum()
- else: #one-hot or soft label
- correct += (pred.argmax(1) == label.argmax(1)).asnumpy().sum()
-
- test_acc = 100 * correct / total
- val_time = time.time() - val_start
- print(f"Val time: {val_time:.2f}, \t Val acc: {test_acc:0.5f}%")
- if test_acc > best_acc:
- best_acc = test_acc
- save_best_path = os.path.join(args.ckpt_save_dir, f"{args.model}-best.ckpt")
- ms.save_checkpoint(network, save_best_path)
- print(f"=> New best val acc: {test_acc:0.3f}")
-
- if args.enable_modelarts:
- sync_data(save_best_path, args.train_url + "/" + save_best_path.split("/")[-1])
-
- # Save checkpoint
- if is_main_process:
- if ((epoch + 1) % args.ckpt_save_interval == 0) or (epoch + 1 == args.epoch_size):
- if need_flush_from_cache:
- need_flush_from_cache = flush_from_cache(network)
-
- ms.save_checkpoint(optimizer, os.path.join(args.ckpt_save_dir, f'{args.model}_optim.ckpt'))
- save_path = os.path.join(args.ckpt_save_dir, f"{args.model}-{epoch + 1}_{num_batches}.ckpt")
- ckpoint_filelist = manager.save_ckpoint(network, num_ckpt=args.keep_checkpoint_max,
- metric=test_acc, save_path=save_path)
- if args.enable_modelarts:
- sync_data(save_path, args.train_url + "/weights/" + save_path.split("/")[-1])
-
- if args.ckpt_save_policy == 'top_k':
- checkpoints_str = "Top K accuracy checkpoints: \n"
- for ch in ckpoint_filelist:
- checkpoints_str += '{}\n'.format(ch)
- else:
- print(f"Saving model to {save_path}")
-
- epoch_time = time.time() - epoch_start
- print(f'=> Epoch {epoch + 1}, total time cost:{epoch_time:.3f}s')
-
- with open(log_txt_fp, 'a', encoding="utf-8") as fp:
- values = f'{epoch}\t{loss.asnumpy():.6f}\t{cur_lr:.7f}\t{epoch_time:.5f}\t-\t-\n'
- fp.write(values)
- if args.enable_modelarts:
- sync_data(log_txt_fp, args.train_url + '/' + log_txt_fp.split("/")[-1])
-
- print("Done!")
-
- def flush_from_cache(network):
- """Flush cache data to host if tensor is cache enable."""
- has_cache_params = False
- params = network.get_parameters()
- for param in params:
- if param.cache_enable:
- has_cache_params = True
- Tensor(param).flush_from_cache()
- if not has_cache_params:
- need_flush_from_cache = False
- else:
- need_flush_from_cache = True
- return need_flush_from_cache
-
- if __name__ == '__main__':
- args = parse_args()
-
- ms_mode = context.GRAPH_MODE if args.mode == 0 else context.PYNATIVE_MODE
- context.set_context(mode=ms_mode, device_target=args.device_target)
-
- if args.device_target == "Ascend":
- device_id = int(os.getenv('DEVICE_ID', 0))
- context.set_context(device_id=device_id)
- else:
- #raise NotImplementedError
- device_id = None
-
- # Distribute Train
- rank, rank_size, parallel_mode = 0, 1, ParallelMode.STAND_ALONE
- if args.distribute:
- init()
- rank, rank_size, parallel_mode = get_rank(), get_group_size(), ParallelMode.DATA_PARALLEL
- context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=rank_size)
-
- args.rank_size = rank_size
- args.rank = rank
-
- args.enable_modelarts = True
-
- # Train
- train(args)
|