|
- # encoding: utf-8
- """
- @author: sherlock
- @contact: sherlockliao01@gmail.com
- """
-
- import logging
-
- import torch
- import torch.nn as nn
- from ignite.engine import Engine, Events
- from ignite.handlers import ModelCheckpoint, Timer
- from ignite.metrics import RunningAverage
- import torch.nn.functional as F
- from utils.reid_metric import R1_mAP
- import numpy as np
-
-
-
- global ITER
- ITER = 0
-
-
- from .triplet_loss_anti import CrossEntropyLabelSmooth_neg, TripletLoss_anti
- softmin = CrossEntropyLabelSmooth_neg(395)
- antitriplet = TripletLoss_anti(-0.3)
-
- def normalize(x):
- x = 1. * x / (torch.norm(x, 2, -1, keepdim=True).expand_as(x) + 1e-12)
- return x
-
- def euclidean_dist(x, y):
- m, n = x.size(0), y.size(0)
- xx = torch.pow(x, 2).sum(1).expand(m, n)
- yy = torch.pow(y, 2).sum(1).expand(n, m).t()
- dist = xx + yy
- dist.addmm_(1, -2, x, y.t())
- dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
- return dist
-
-
- def triplet(feat1, feat2):
-
- '=================================================================================='
- distmap = euclidean_dist(feat1, feat2)
- # distmap = -torch.mm(feat1, feat2.permute(1,0).contiguous())
- # distmap = -torch.mm(normalize(feat1), normalize(feat2).permute(1,0).contiguous())
- # distmap=-torch.mm(feat1,feat2.permute(1,0).contiguous())/torch.mm(torch.norm(feat1,dim=1,keepdim=True),torch.norm(feat2,dim=1,keepdim=True).permute(1,0).contiguous())
- '=================================================================================='
-
-
- distap = torch.diag(distmap)
- distan = distmap + (torch.eye(distmap.shape[0]) * 100).to('cuda')
- distan1 = torch.min(distan, 1)[0]
- distan2 = torch.min(distan, 0)[0]
- distan = torch.min(distan1, distan2)
- cross_triplet_loss = torch.mean(torch.clamp(0.5 + distap - distan, min=0.0))
- return cross_triplet_loss
-
-
-
- def triplet_minus(feat1, feat2):
-
- distmap = euclidean_dist(feat1, feat2)
- distap = torch.diag(distmap)
-
- # '======================================================================================='
- # distmap1 = distmap.view(12,32,12,32).permute(0,2,1,3).contiguous().view(-1,32,32)
- # distmap1 = distmap1 + (torch.eye(32) * 100).to('cuda')
- # distan = torch.min(torch.min(distmap1, 1)[0], torch.min(distmap1, 2)[0])
- # distan = distan[np.arange(12)*12].view(-1)
- # '======================================================================================='
-
- '======================================================================================='
- distmap1 = distmap.view(12,32,12,32).permute(1,3,0,2).contiguous().view(-1,12,12)
- distmap1 = distmap1 + (torch.eye(12) * 100).to('cuda')
- distan = torch.min(torch.min(distmap1, 1)[0], torch.min(distmap1, 2)[0])
- distan = distan[np.arange(32)*32].permute(1,0).contiguous().view(-1)
- '======================================================================================='
-
-
- cross_triplet_loss = torch.mean(torch.clamp(0.5 + distap - distan, min=0.0))
- return cross_triplet_loss
-
-
- def triplet_tri(feat1, feat2):
-
- distmap1 = euclidean_dist(feat1, feat2)
- distmap2 = euclidean_dist(feat1, feat1)
- distmap3 = euclidean_dist(feat2, feat2)
-
- distap = torch.diag(distmap1)
-
- distan1 = distmap1 + (torch.eye(distmap1.shape[0]) * 100).to('cuda')
- distan11 = torch.min(distan1, 1)[0]
- distan12 = torch.min(distan1, 0)[0]
- distan1 = torch.min(distan11, distan12)
-
- distan2 = distmap2 + (torch.eye(distmap2.shape[0]) * 100).to('cuda')
- distan21 = torch.min(distan2, 1)[0]
- distan22 = torch.min(distan2, 0)[0]
- distan2 = torch.min(distan21, distan22)
-
- distan3 = distmap3 + (torch.eye(distmap3.shape[0]) * 100).to('cuda')
- distan31 = torch.min(distan3, 1)[0]
- distan32 = torch.min(distan3, 0)[0]
- distan3 = torch.min(distan31, distan32)
-
- '================================================================================================================='
- cross_triplet_loss = torch.mean(torch.clamp(0.3 + distap - torch.min(torch.min(distan1, distan2), distan3), min=0.0))
- '================================================================================================================='
-
- # '================================================================================================================='
- # loss1 = torch.mean(torch.clamp(0.5 + distap - distan1, min=0.0))
- # loss2 = torch.mean(torch.clamp(0.5 + distap - distan2, min=0.0))
- # loss3 = torch.mean(torch.clamp(0.5 + distap - distan3, min=0.0))
- # cross_triplet_loss = loss1 + 0.1*loss2 + 0.1*loss3
- # '================================================================================================================='
-
- # '================================================================================================================='
- # cross_triplet_loss = torch.mean(torch.clamp(0.5 + 3*distap - (distan1 + distan2 + distan3), min=0.0))
- # '================================================================================================================='
-
- '================================================================================================================='
-
- '================================================================================================================='
-
- return cross_triplet_loss
-
-
- def full_triplet(fi, fv, label):
- from .triplet_loss_ori import TripletLoss
- triplet_ori = TripletLoss(0.3)
- return triplet_ori(fi, fv, label)[0] + triplet_ori(fv, fi, label)[0]
-
-
- def create_supervised_trainer(model, optimizer, loss_fn, device=None):
- if device:
- if torch.cuda.device_count() > 1:model = nn.DataParallel(model)
- model.to(device)
-
- def _update(engine, batch):
- indx = np.arange(batch[0].shape[0]//2)*2
- bs = indx.shape[0]
-
- model.train()
- optimizer.zero_grad()
- img, target = batch
- img = img.to(device) if torch.cuda.device_count() >= 1 else img
- target = target.to(device) if torch.cuda.device_count() >= 1 else target
-
-
- # 'baseline========================================================================================='
- # score, feat = model(img)
- # loss_rgb = loss_fn(score[indx], feat[indx], target[indx])
- # loss_infrared = loss_fn(score[indx+1], feat[indx+1], target[indx])
- #
- # visible = feat[indx].unsqueeze(0).unsqueeze(0)
- # infrared = feat[indx+1].unsqueeze(0).unsqueeze(0)
- # visible = F.avg_pool2d(visible, kernel_size=(4,1), stride=(4,1)).view(-1, 64)
- # infrared = F.avg_pool2d(infrared, kernel_size=(4,1), stride=(4,1)).view(-1, 64)
- # loss_cmt_iv = triplet(infrared, visible) + triplet(visible, infrared)
- #
- # # loss = loss_rgb + loss_infrared
- # loss = loss_rgb + loss_infrared + 0.1*loss_cmt_iv
- # 'baseline========================================================================================='
-
-
-
- 'triplet========================================================================================='
- # score, feat = model(img)
- feat = model(img)
- # print(list(feat)[indx])
- # import pdb
- # pdb.set_trace()
- # loss_rgb = loss_fn(score[indx], feat[indx], target[indx])
- loss_rgb = loss_fn(feat[indx], feat[indx], target[indx])
- # loss_infrared = loss_fn(score[indx+1], feat[indx+1], target[indx])
- loss_infrared = loss_fn(feat[indx+1], feat[indx+1], target[indx])
-
- visible = feat[indx].unsqueeze(0).unsqueeze(0)
- infrared = feat[indx+1].unsqueeze(0).unsqueeze(0)
- visible = F.avg_pool2d(visible, kernel_size=(4,1), stride=(4,1)).view(-1, 64)
- infrared = F.avg_pool2d(infrared, kernel_size=(4,1), stride=(4,1)).view(-1, 64)
- loss_cmt_iv = triplet(infrared, visible) + triplet(visible, infrared)
-
- # loss = loss_rgb + loss_infrared
- loss = loss_rgb + loss_infrared + 0.1*loss_cmt_iv
- 'triplet========================================================================================='
-
-
-
- # 'attention========================================================================================='
- # score, score_n, feat, feat_n = model(img)
- # loss_rgb = loss_fn(score[indx], feat[indx], target[indx])
- # loss_infrared = loss_fn(score[indx+1], feat[indx+1], target[indx])
- #
- # loss_neg = softmin(score_n, target)
- #
- # visible = feat[indx].unsqueeze(0).unsqueeze(0)
- # infrared = feat[indx+1].unsqueeze(0).unsqueeze(0)
- # visible = F.avg_pool2d(visible, kernel_size=(4,1), stride=(4,1)).view(-1,512)
- # infrared = F.avg_pool2d(infrared, kernel_size=(4,1), stride=(4,1)).view(-1,512)
- # loss_cmt_iv = triplet(infrared, visible) + triplet(visible, infrared)
- #
- # loss = loss_rgb + loss_infrared + 0.1*loss_cmt_iv + 0.1*loss_neg
- # 'attention========================================================================================='
-
-
- # 'cross========================================================================================='
- # c_p, c_n, f_p, f_n = model(img)
- # loss_rgb = loss_fn(c_p[:bs], f_p[:bs], target[indx]) + loss_fn(c_n[:bs], f_n[:bs], target[indx])
- # loss_infrared = loss_fn(c_p[bs:], f_p[bs:], target[indx+1]) + loss_fn(c_n[bs:], f_n[bs:], target[indx+1])
- #
- # v_p = f_p[indx].unsqueeze(0).unsqueeze(0)
- # i_p = f_p[indx+1].unsqueeze(0).unsqueeze(0)
- # v_p = F.avg_pool2d(v_p, kernel_size=(4,1), stride=(4,1)).view(-1,2048)
- # i_p = F.avg_pool2d(i_p, kernel_size=(4,1), stride=(4,1)).view(-1,2048)
- # loss_cmt_iv_p = triplet(i_p, v_p) + triplet(v_p, i_p)
- #
- # v_n = f_n[indx].unsqueeze(0).unsqueeze(0)
- # i_n = f_n[indx+1].unsqueeze(0).unsqueeze(0)
- # v_n = F.avg_pool2d(v_n, kernel_size=(4,1), stride=(4,1)).view(-1,2048)
- # i_n = F.avg_pool2d(i_n, kernel_size=(4,1), stride=(4,1)).view(-1,2048)
- # loss_cmt_iv_n = triplet(i_n, v_n) + triplet(v_n, i_n)
- #
- # loss = loss_rgb + loss_infrared + 0.1*loss_cmt_iv_p * 0.1*loss_cmt_iv_n
- # 'cross========================================================================================='
-
-
- # 'aaai========================================================================================='
- # target = torch.cat((target, target[indx]), dim=0)
- # score, feat = model(img)
- # loss_rgb = loss_fn(score[indx], feat[indx], target[indx])
- # loss_infrared = loss_fn(score[indx+1], feat[indx+1], target[indx])
- # loss_x = loss_fn(score[bs*2:], feat[bs*2:], target[indx])
- #
- # visible = feat[indx].unsqueeze(0).unsqueeze(0)
- # infrared = feat[indx+1].unsqueeze(0).unsqueeze(0)
- # x = feat[bs*2:].unsqueeze(0).unsqueeze(0)
- # visible = F.avg_pool2d(visible, kernel_size=(4,1), stride=(4,1)).view(-1, 512)
- # infrared = F.avg_pool2d(infrared, kernel_size=(4,1), stride=(4,1)).view(-1, 512)
- # x = F.avg_pool2d(x, kernel_size=(4,1), stride=(4,1)).view(-1, 512)
- # loss_cmt_iv = triplet(infrared, visible) + triplet(visible, infrared)
- # loss_cmt_ix = triplet(infrared, x) + triplet(x, infrared)
- #
- # # visible = feat[indx]
- # # infrared = feat[indx+1]
- # # x = feat[bs*2:]
- # # loss_cmt_iv = full_triplet(infrared, visible, target[indx])
- # # loss_cmt_ix = full_triplet(infrared, x, target[indx])
- #
- # loss = loss_rgb + loss_infrared + loss_x + 0.1*loss_cmt_iv + 0.1*loss_cmt_ix
- # 'aaai========================================================================================='
-
-
-
- loss.backward()
- optimizer.step()
- # import pdb
- # pdb.set_trace()
- # acc = (score[indx].max(1)[1] == target[indx]).float().mean()
- return loss.item(),0 #, acc.item()
-
- return Engine(_update)
-
-
- def create_supervised_trainer_attention(model, optimizer, loss_fn, device=None):
- if device:
- if torch.cuda.device_count() > 1: model = nn.DataParallel(model)
- model.to(device)
-
- def _update(engine, batch):
- indx = np.arange(batch[0].shape[0]//2)*2
- bs = indx.shape[0]
- BS = bs*2
-
- model.train()
- optimizer.zero_grad()
- img, target = batch
- img = img.to(device) if torch.cuda.device_count() >= 1 else img
- target = target.to(device) if torch.cuda.device_count() >= 1 else target
-
-
- # 'Attention========================================================================================='
- # score, score_n, feat, feat_n = model(img)
- # loss_rgb = loss_fn(score[:bs], feat[:bs], target[indx])
- # loss_infrared = loss_fn(score[bs:], feat[bs:], target[indx+1])
- #
- # loss_neg = softmin(score_n, torch.cat((target[indx], target[indx+1])))
- #
- # # visible = feat[indx].unsqueeze(0).unsqueeze(0)
- # # infrared = feat[indx+1].unsqueeze(0).unsqueeze(0)
- # # visible = F.avg_pool2d(visible, kernel_size=(4,1), stride=(4,1)).view(bs//4, -1)
- # # infrared = F.avg_pool2d(infrared, kernel_size=(4,1), stride=(4,1)).view(bs//4, -1)
- # # loss_cmt_iv = triplet(infrared, visible) + triplet(visible, infrared)
- #
- # loss = loss_rgb + loss_infrared + 0.1*loss_neg
-
- # acc = (score[:bs].max(1)[1] == target[indx]).float().mean()
- # loss.backward()
- # optimizer.step()
- # return loss_neg.item(), acc.item()
- # 'Attention========================================================================================='
-
-
- 'Attention========================================================================================='
- score, feat = model(img)
- loss_rgb = loss_fn(score[:bs], feat[:bs], target[indx])
- loss_infrared = loss_fn(score[bs:], feat[bs:], target[indx+1])
-
- loss = loss_rgb + loss_infrared
-
- acc = (score[:bs].max(1)[1] == target[indx]).float().mean()
- loss.backward()
- optimizer.step()
- return loss.item(), acc.item()
- 'Attention========================================================================================='
-
-
- return Engine(_update)
-
-
- def do_train(cfg, model, train_loader, val_loader, optimizer, scheduler, loss_fn, num_query,start_epoch):
- log_period = cfg.SOLVER.LOG_PERIOD
- checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
- eval_period = cfg.SOLVER.EVAL_PERIOD
- output_dir = cfg.OUTPUT_DIR
- device = cfg.MODEL.DEVICE
- epochs = cfg.SOLVER.MAX_EPOCHS
-
- logger = logging.getLogger("reid_baseline.train")
- logger.info("Start training")
-
-
- #=============================================================================================#
- trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
- # trainer = create_supervised_trainer_attention(model, optimizer, loss_fn, device=device)
- #=============================================================================================#
-
-
- evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
- checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=1, require_empty=False)
- timer = Timer(average=True)
-
-
- trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model, 'optimizer': optimizer})
- # trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model.state_dict(),'optimizer': optimizer.state_dict()})
-
-
- timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
- pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
-
- # average metric to attach on trainer
- RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')
- RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')
-
- @trainer.on(Events.STARTED)
- def start_training(engine):
- engine.state.epoch = start_epoch
-
- @trainer.on(Events.EPOCH_STARTED)
- def adjust_learning_rate(engine):
- scheduler.step()
-
- @trainer.on(Events.ITERATION_COMPLETED)
- def log_training_loss(engine):
- global ITER
- ITER += 1
-
- if ITER % log_period == 0:
- logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
- .format(engine.state.epoch, ITER, len(train_loader),
- engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],
- scheduler.get_lr()[0]))
- if len(train_loader) == ITER:
- ITER = 0
-
- # adding handlers using `trainer.on` decorator API
- @trainer.on(Events.EPOCH_COMPLETED)
- def print_times(engine):
- logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'
- .format(engine.state.epoch, timer.value() * timer.step_count,
- train_loader.batch_size / timer.value()))
- logger.info('-' * 10)
- timer.reset()
-
- @trainer.on(Events.EPOCH_COMPLETED)
- def log_validation_results(engine):
- if engine.state.epoch % eval_period == 0:
- evaluator.run(val_loader)
- cmc, mAP = evaluator.state.metrics['r1_mAP']
- logger.info("Validation Results - Epoch: {}".format(engine.state.epoch))
- logger.info("mAP: {:.1%}".format(mAP))
- for r in [1, 5, 10]:
- logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
-
- trainer.run(train_loader, max_epochs=epochs)
-
-
- def create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cetner_loss_weight,device=None):
- """
- Factory function for creating a trainer for supervised models
-
- Args:
- model (`torch.nn.Module`): the model to train
- optimizer (`torch.optim.Optimizer`): the optimizer to use
- loss_fn (torch.nn loss function): the loss function to use
- device (str, optional): device type specification (default: None).
- Applies to both model and batches.
-
- Returns:
- Engine: a trainer engine with supervised update function
- """
- if device:
- if torch.cuda.device_count() > 1:
- model = nn.DataParallel(model)
- model.to(device)
-
- def _update(engine, batch):
-
- indx = np.arange(batch[0].shape[0] // 2) * 2
-
- model.train()
- optimizer.zero_grad()
- optimizer_center.zero_grad()
- img, target = batch
- img = img.to(device) if torch.cuda.device_count() >= 1 else img
- target = target.to(device) if torch.cuda.device_count() >= 1 else target
-
- # rgb = img[indx]
- # gray = torch.max(rgb, dim=1, keepdim=True)[0].expand_as(rgb)
- # img = torch.cat((img, gray),dim=0)
- target = torch.cat((target, target[indx]), dim=0)
-
- score, feat = model(img)
-
- loss_rgb = loss_fn(score[indx], feat[indx], target[indx])
- loss_infrared = loss_fn(score[indx+1], feat[indx+1], target[indx])
- loss_gray = loss_fn(score[batch[0].shape[0]:], feat[batch[0].shape[0]:], target[indx])
-
- loss_cross_triplet = 0.1 * trip3(feat)
-
- #loss = loss_rgb + loss_infrared
-
- loss = loss_rgb + loss_infrared + loss_gray + loss_cross_triplet
-
- # print("Total loss is {}, center loss is {}".format(loss, center_criterion(feat, target)))
-
- loss.backward()
- optimizer.step()
- for param in center_criterion.parameters():
- param.grad.data *= (1. / cetner_loss_weight)
- optimizer_center.step()
-
- # compute acc
- acc = (score.max(1)[1] == target).float().mean()
- return loss.item(), acc.item()
-
- return Engine(_update)
-
-
- def create_supervised_evaluator(model, metrics, device=None):
- """
- Factory function for creating an evaluator for supervised models
-
- Args:
- model (`torch.nn.Module`): the model to train
- metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
- device (str, optional): device type specification (default: None).
- Applies to both model and batches.
- Returns:
- Engine: an evaluator engine with supervised inference function
- """
- if device:
- if torch.cuda.device_count() > 1:
- model = nn.DataParallel(model)
- model.to(device)
-
- def _inference(engine, batch):
- model.eval()
- with torch.no_grad():
- data, pids, camids = batch
- data = data.to(device) if torch.cuda.device_count() >= 1 else data
- feat = model(data)
- return feat, pids, camids
-
- engine = Engine(_inference)
-
- for name, metric in metrics.items():
- metric.attach(engine, name)
-
- return engine
-
-
- def do_train_with_center(cfg,model,center_criterion,train_loader,val_loader,optimizer,optimizer_center,scheduler,loss_fn,num_query,start_epoch):
- log_period = cfg.SOLVER.LOG_PERIOD
- checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
- eval_period = cfg.SOLVER.EVAL_PERIOD
- output_dir = cfg.OUTPUT_DIR
- device = cfg.MODEL.DEVICE
- epochs = cfg.SOLVER.MAX_EPOCHS
-
- logger = logging.getLogger("reid_baseline.train")
- logger.info("Start training")
-
- trainer = create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cfg.SOLVER.CENTER_LOSS_WEIGHT, device=device)
- evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
- checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)
-
- timer = Timer(average=True)
-
- trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,
- 'optimizer': optimizer,
- 'optimizer_center': optimizer_center})
-
-
- # trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model.state_dict(),
- # 'optimizer': optimizer.state_dict(),
- # 'optimizer_center': optimizer_center.state_dict()})
-
- timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
- pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
-
- # average metric to attach on trainer
- RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')
- RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')
-
- @trainer.on(Events.STARTED)
- def start_training(engine):
- engine.state.epoch = start_epoch
-
- @trainer.on(Events.EPOCH_STARTED)
- def adjust_learning_rate(engine):
- scheduler.step()
-
- @trainer.on(Events.ITERATION_COMPLETED)
- def log_training_loss(engine):
- global ITER
- ITER += 1
-
- if ITER % log_period == 0:
- logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
- .format(engine.state.epoch, ITER, len(train_loader),
- engine.state.metrics['avg_loss'], engine.state.metrics['avg_acc'],
- scheduler.get_lr()[0]))
- if len(train_loader) == ITER:
- ITER = 0
-
- # adding handlers using `trainer.on` decorator API
- @trainer.on(Events.EPOCH_COMPLETED)
- def print_times(engine):
- logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]'
- .format(engine.state.epoch, timer.value() * timer.step_count,
- train_loader.batch_size / timer.value()))
- logger.info('-' * 10)
- timer.reset()
-
- @trainer.on(Events.EPOCH_COMPLETED)
- def log_validation_results(engine):
- if engine.state.epoch % eval_period == 0:
- evaluator.run(val_loader)
- cmc, mAP = evaluator.state.metrics['r1_mAP']
- logger.info("Validation Results - Epoch: {}".format(engine.state.epoch))
- logger.info("mAP: {:.1%}".format(mAP))
- for r in [1, 5, 10]:
- logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
-
- trainer.run(train_loader, max_epochs=epochs)
|