|
-
- import argparse
- import os, sys
- sys.path.append("../")
-
- parser = argparse.ArgumentParser(description="Arg parser")
- parser.add_argument('--gpu', type=int, default=0, help='GPU to use')
-
-
- args = parser.parse_args()
- os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
-
- import torch
- import torch.nn as nn
- from torch.utils.data import DataLoader
- from utils.xyz_util import save_xyz_file
-
- from model import Generator
-
- import tensorflow as tf
- from tf_ops import pc_util
- from tf_ops.sampling.tf_sampling import farthest_point_sample
- import logging
- import os
- from tqdm import tqdm
- from glob import glob
- import math
- from time import time
- import numpy as np
-
-
- def patch_prediction(patch_point):
- # normalize the point clouds
- patch_point, centroid, furthest_distance = pc_util.normalize_point_cloud(patch_point)
- #print('---------', type(centroid))
- patch_point = np.expand_dims(patch_point, axis=0)
-
- pred = Gen(torch.from_numpy(patch_point).permute(0,2,1).float().cuda()).permute(0,2,1).cpu().detach().numpy()
- #print(pred.shape)
-
- pred = np.squeeze(centroid + [pred] * furthest_distance, axis=0)
- return pred
-
- def pc_prediction(pc):
- ## get patch seed from farthestsampling
- points = tf.convert_to_tensor(np.expand_dims(pc,axis=0),dtype=tf.float32)
- start= time()
- print('------------------patch_num_point:', 256)
- seed1_num = int(pc.shape[0] / 256 * 3)
-
- ## FPS sampling
- with tf.Session() as sess:
- seed = farthest_point_sample(seed1_num, points).eval()[0]
- seed_list = seed[:seed1_num]
-
- print("farthest distance sampling cost", time() - start)
- print("number of patches: %d" % len(seed_list))
- input_list = []
- up_point_list=[]
-
- patches = pc_util.extract_knn_patch(pc[np.asarray(seed_list), :], pc, 256)
-
- for point in tqdm(patches, total=len(patches)):
- up_point = patch_prediction(point)
- up_point = np.squeeze(up_point,axis=0)
- input_list.append(point)
- up_point_list.append(up_point)
-
- return input_list, up_point_list
-
-
- if __name__ == '__main__':
- Gen = Generator()
-
- #self.pred_pc = Gen(self.inputs)
- # for i in range(round(math.pow(self.opts.up_ratio, 1 / 4)) - 1):
- # self.pred_pc = Gen(self.pred_pc)
-
- # saver = tf.train.Saver()
- # restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(self.opts.log_dir)
- # print('==================>', checkpoint_path)
- # saver.restore(self.sess, checkpoint_path)
-
- checkpoint = torch.load('./checkpoints/pugan2/G_iter_99.pth')
- Gen.load_state_dict(checkpoint)
- Gen.eval().cuda()
-
- samples = glob('/userhome/zyc/PUGAN-pytorch-master/data/test/*.xyz')
- #print('sssssssssssssssss',samples)
- point = pc_util.load(samples[0])
- num_point = point.shape[0]
- out_point_num = int(num_point * 4)
-
- for point_path in samples:
- logging.info(point_path)
- start = time()
- pc = pc_util.load(point_path)[:,:3]
- pc, centroid, furthest_distance = pc_util.normalize_point_cloud(pc)
-
- input_list, pred_list = pc_prediction(pc)
-
- end = time()
- print("total time: ", end - start)
- pred_pc = np.concatenate(pred_list, axis=0)
- pred_pc = (pred_pc * furthest_distance) + centroid
-
- pred_pc = np.reshape(pred_pc,[-1,3])
-
- out_folder ='./data/test/output'
- if not os.path.exists(out_folder):
- os.makedirs(out_folder)
-
- path = os.path.join(out_folder, point_path.split('/')[-1][:-4] + '.ply')
-
- with tf.Session() as sess:
- idx = farthest_point_sample(out_point_num, pred_pc[np.newaxis, ...]).eval()[0]
- pred_pc = pred_pc[idx, 0:3]
-
- np.savetxt(path[:-4] + '.xyz',pred_pc,fmt='%.6f')
-
|