|
- import torch
- import torch.utils.data
- import torch.nn.parallel
- import os
- import numpy as np
- import scipy.io as sio
- import argparse
- import fnmatch
- import Datasets
- import models
- from torch.autograd.variable import Variable
-
- from torchvision.utils import save_image
- '''for evaluation part packages'''
- from eval import Eval
- from Datasets.plyfile.plyfile import PlyData
- from metrics.evaluation_metrics import EMD_CD
- from metrics.evaluation_metrics import jsd_between_point_cloud_sets as JSD
- from metrics.evaluation_metrics import compute_all_metrics
-
- np.random.seed(5)
- torch.manual_seed(5)
-
- dataset_names = sorted(name for name in Datasets.__all__)
- model_names = sorted(name for name in models.__all__)
-
- parser = argparse.ArgumentParser(description= 'Eval_metrics: JSD MMD 1-NNA', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- #parser.add_argument('config',default='/home/rayna/datasets/shape_net_core_uniform_samples_2048/', type=str, help='Path to config file.')
-
- #arguments for Saving Models and datasets
- parser.add_argument('--save',default= True,help= 'Save Models or not ?')
- parser.add_argument('--result', default='./results/', help='output result directory') # root directory of training and generating results')
- parser.add_argument('--pretrained',default='./ckpts/AE_ckpts/shapenet/08-19-08_33/ae_pointnet,Adam,400epochs,b24,lr0.001/model_best.pth.tar',help= 'Use Pretrained Model for testing or resuming training') ## TODO
- #Path to train dataset # TODO
- parser.add_argument('-d', '--data', metavar='DIR', default='/data/rayna/datasets/shape_net_core_uniform_samples_2048/four/', help='Path to Complete Point Cloud Data Set')
- #Path to test dataset # TODO
- parser.add_argument('-dw', '--datatest', default='/data/rayna/datasets/shape_net_core_uniform_samples_2048/four/', help='Path to Complete Point Cloud Data Set')
- # data processing
- parser.add_argument('-n', '--dataName', metavar='Data Set Name', default='shapenet', choices= dataset_names)
- parser.add_argument('-ad', '--adddata', metavar='aDIR', default='', help='Additional path to dataset')
- parser.add_argument('-s','--split_value',default = [0.85,0.3333], help='Ratio of train and validation data split')
-
- # Arguments for Torch Data Loader
- parser.add_argument('-b','--batch_size', type=int, default=1, help='input batch size') #
- parser.add_argument('-w','--workers',type=int, default=8, help='Set the number of workers')
-
- # Arguments for Model Settings
- #parser.add_argument('-m','--model',default='ae_pointnet',help='Chose Your Model Here',choices=['ae_pointnet','ae_rsnet','vae_pointnet']) # TODO
- parser.add_argument('-nt','--net_name',default='auto_encoder',help='Chose The name of your network',choices=['auto_encoder','shape_completion'])
- #parser.add_argument('-md','--model_decoder',default='decoder_sonet',help='Chose Your Decoder Model Here',choices=['decoder_sonet']) # TODO
-
- # Optimizer Settings
- parser.add_argument('-op','--optim',default= 'Adam',help='Specify the Optimizer to use')
- parser.add_argument('--lr',default=0.001,help='Learning Rate for the optimizer') #
- parser.add_argument('--momentum',default=0.9,help='Momentum for the adam optimizer')
- parser.add_argument('--beta',default=0.999,help='beta for the adam optimizer')
- parser.add_argument('--milestones',default=[60,120,180,500,800],help='For learning rate scheduler, will decay learning rate by gamma after each milestone')
- parser.add_argument('--gamma',default=0.5,help='gamma for the learning rate scheduler')
- parser.add_argument('--bias_decay',default=0,help='bias decay')
- # Training Settings
- parser.add_argument('--epochs',default=400,help='Number of epochs to run')
- parser.add_argument('--start_epoch',default=0,help='Starting Epoch')
-
- # Model Parameters
- parser.add_argument('--output_fc_pc_num', type=int, default=256, help='# of fc decoder output points')
- parser.add_argument('--output_conv_pc_num', type=int, default=4096, help='# of conv decoder output points')
- parser.add_argument('--feature_num', type=int, default=1024, help='length of encoded feature')
- parser.add_argument('--activation', type=str, default='relu', help='activation function: relu, elu')
- parser.add_argument('--normalization', type=str, default='batch', help='normalization function: batch, instance')
- ##OT_part
- parser.add_argument('--action', nargs='+',type=str, default='', help=' mode of OT: extract_feature, train_OT'
- ', generate, decode_feature, decode_test')
- parser.add_argument('-a','--angle', type=float, default=0.7, help='angle threshold to generate features') #
- parser.add_argument('-dis','--dissim', type=float, default=0.0, help='dissimarlty') #
- # GPU Settings
- parser.add_argument('--gpu_id', type=int, default=1, help='gpu ids: e.g. 0, 1. -1 is no GPU')
- # Parameter Initialize
- args = parser.parse_args()
- args.device = torch.device("cuda:%d" % (args.gpu_id) if torch.cuda.is_available() else "cpu") # for selecting device for chamfer loss
- #cuda.select_device
- torch.cuda.set_device(args.gpu_id)
- print('Using GPU : ',torch.cuda.current_device())
- os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
-
- if args.action == '':
- actions = ['extract_feature', 'train_OT', 'generate', 'decode_feature', 'decode_test']
- else:
- actions = args.action
-
- def main():
- '''------------------------------------- Generating args----------------------------------------- ------------'''
- max_gen_samples = 10000 # max number of generated samples. Used to avoid out of memory error.
- angle_threshold = args.angle # angle threshold of OT generator ranging from [0,1]. See paper for details.
- rec_gen_distance = args.dissim # dis-similarity between reconstructed samples and generated samples, ranging from [0,1] with smaller meaning more similar
- args.model = args.pretrained.split('/')[5].split(',')[0]
- if args.data.split('/')[5] == 'four':
- numOfClassess = 4
- elif args.data.split('/')[5] == 'eight':
- numOfClassess = 8
- else:
- numOfClassess = 1
- nameOfClass = args.data.split('/')[5]
-
- labels = ['02691156','02933112','03001627','04379243',
- '02828884','02958343','03211117','04256520']
- #labels_one = ['airplane', 'cabinet', 'chair', 'table',
- # 'bench', 'car', 'display', 'sofa']
- """------------------------------------- Creating output directories----------------------------------------- """
- '''Get the corresponding model file name'''
- import re
- time_stamp = re.split('\/', args.pretrained)[-3]
- '''Create result directories'''
- result_root_path = args.result + time_stamp
- print('==> results dir:', result_root_path)
- if not os.path.exists(result_root_path):
- os.mkdir(result_root_path)
-
- '''1. extract_feature output dir: GFV'''
- save_path = '{}'.format(args.model) # encoder_pointnet
- save_path = os.path.join(args.dataName,save_path)
- save_path = os.path.join('GFV', save_path)
- save_path = os.path.join(result_root_path, save_path)
- print('==> GFV dir:{}', save_path)
- if not os.path.exists(save_path):
- os.makedirs(save_path)
- feature_save_path = os.path.join(save_path, 'GFV.pt')
- '''2. train_OT output dir: '''
- ot_path = os.path.join(result_root_path,'OT')
- '''3. generate output dir: h '''
- outP_path = os.path.join(ot_path, str(angle_threshold) + '_' + str(rec_gen_distance))
- h_path = os.path.join(ot_path, 'h.pt')
- print('==> generate h path{} and outP_path{}', h_path, outP_path)
- gen_feature_path = os.path.join(outP_path, 'output_P_gen')
- if not os.path.exists(ot_path):
- os.makedirs(ot_path)
- '''4. decode_feature output dir : for evaluate trainset'''
- gen_pc_pair_path = os.path.join(ot_path, str(angle_threshold)+'_'+str(rec_gen_distance), 'gen_pcs_pairs/')
- gen_pc_pair_drop_path = os.path.join(ot_path, str(angle_threshold)+'_'+str(rec_gen_distance), 'gen_pcs_pairs_drop/')
- print('==> gen_pc_pair_path:', gen_pc_pair_path)
- if not os.path.exists(gen_pc_pair_path):
- os.makedirs(gen_pc_pair_path)
- eval_train_path = os.path.join(gen_pc_pair_path[:-14], 'eval_train.csv')
- eval_train_drop_path = os.path.join(gen_pc_pair_path[:-14], 'eval_train_drop.csv')
- '''5. decode_test output dir: for evaluate testset'''
- gen_pc_path = os.path.join(ot_path, str(angle_threshold) + '_' + str(rec_gen_distance), 'gen_pcs/')
- print('==> gen_pc_path:', gen_pc_path)
- if not os.path.exists(gen_pc_path):
- os.makedirs(gen_pc_path)
- root_eval = gen_pc_path[:-8]
- eval_test_path = os.path.join(root_eval, 'eval_test.csv')
-
- save_dir = os.path.join(ot_path, str(angle_threshold) + '_' + str(rec_gen_distance))
- print('save_dir path:', save_dir)
- path_g = os.path.join(save_dir, "model_out_smp_g.npy")
- path_AE = os.path.join(save_dir, "model_out_smp_AE.npy")
- path_ref = os.path.join(save_dir, "model_out_ref.npy")
- path_drop_AE = os.path.join(save_dir, "model_drop_smp.npy")
- path_drop_ref = os.path.join(save_dir, "model_drop_ref.npy")
-
- print('-----------------------------------Evaluations---------------------------------------------')
-
- # =====================generate reconstructed-generated point cloud pairs===========
- if os.path.exists(eval_train_path):
- os.remove(eval_train_path)
-
- print('Already have data')
- sample_pcs_g = torch.from_numpy(np.load(path_g)).cuda()
- sample_pcs_AE = torch.from_numpy(np.load(path_AE)).cuda()
- ref_pcs = torch.from_numpy(np.load(path_ref)).cuda()
-
- print("Generation sample size:%s reference size: %s"
- % (sample_pcs_g.size(), ref_pcs.size()))
- args.batch_size = 128
- # Compute metrics
- # l-NNA metric
- print('Generated Evaluation: ours vs org\n')
- results = compute_all_metrics(sample_pcs_g, ref_pcs, args.batch_size, accelerated_cd=True)
- results = {k: (v.cpu().detach().item()
- if not isinstance(v, float) else v) for k, v in results.items()}
- print(results)
-
- # JSD metric
- sample_pcl_npy = sample_pcs_g.cpu().detach().numpy()
- ref_pcl_npy = ref_pcs.cpu().detach().numpy()
- jsd = JSD(sample_pcl_npy, ref_pcl_npy)
- print("JSD:%s" % jsd)
-
-
- # Compute metrics
- # l-NNA metric
- print('Generated Evaluation: AE vs org\n')
- results = compute_all_metrics(sample_pcs_AE, ref_pcs, args.batch_size, accelerated_cd=True)
- results = {k: (v.cpu().detach().item()
- if not isinstance(v, float) else v) for k, v in results.items()}
- pprint(results)
-
- # JSD metric
- sample_pcl_npy = sample_pcs_AE.cpu().detach().numpy()
- ref_pcl_npy = ref_pcs.cpu().detach().numpy()
- jsd = JSD(sample_pcl_npy, ref_pcl_npy)
- print("JSD:%s" % jsd)
-
-
-
- print('Already have dropped data')
- sample_pcs_AE = torch.from_numpy(np.load(path_drop_AE)).cuda()
- ref_pcs = torch.from_numpy(np.load(path_drop_ref)).cuda()
- # Compute metrics
- # l-NNA metric
- print('Dropped Evaluation: AE vs org\n')
- results = compute_all_metrics(sample_pcs_AE, ref_pcs, args.batch_size, accelerated_cd=True)
- results = {k: (v.cpu().detach().item()
- if not isinstance(v, float) else v) for k, v in results.items()}
- print(results)
-
- ##JSD metric
- print('Dropped Evaluation: AE vs org\n')
- sample_pcl_npy = sample_pcs_AE.cpu().detach().numpy()
- ref_pcl_npy = ref_pcs.cpu().detach().numpy()
- jsd = JSD(sample_pcl_npy, ref_pcl_npy)
- print("JSD:%s" % jsd)
-
- print('Complete.')
-
- if __name__=='__main__':
- main()
|