|
- import argparse
- import os
- import numpy as np
- import tensorflow as tf
- from glob import glob
- import re
- import csv
- from collections import OrderedDict
- import os
- from Common import pc_util
- from Common.pc_util import load, save_ply_property,get_pairwise_distance
- from Common.ops import normalize_point_cloud
- from tf_ops.nn_distance import tf_nndistance
- from sklearn.neighbors import NearestNeighbors
- import math
- from time import time
-
-
-
- precentages = np.array([0.008, 0.012])
-
- def cal_nearest_distance(queries, pc, k=2):
- """
- """
- knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')
- knn_search.fit(pc)
- dis,knn_idx = knn_search.kneighbors(queries, return_distance=True)
- return dis[:,1]
-
- def analyze_uniform(idx_file,radius_file,map_points_file):
- start_time = time()
- points = load(map_points_file)[:,4:]
- radius = np.loadtxt(radius_file)
- print('radius:',radius)
- with open(idx_file) as f:
- lines = f.readlines()
-
- sample_number = 1000
- rad_number = radius.shape[0]
-
- uniform_measure = np.zeros([rad_number,1])
-
- densitys = np.zeros([rad_number,sample_number])
-
-
- expect_number = precentages * points.shape[0]
- expect_number = np.reshape(expect_number, [rad_number, 1])
-
- for j in range(rad_number):
- uniform_dis = []
-
- for i in range(sample_number):
-
- density, idx = lines[i*rad_number+j].split(':')
- densitys[j,i] = int(density)
- coverage = np.square(densitys[j,i] - expect_number[j]) / expect_number[j]
-
- num_points = re.findall("(\d+)", idx)
-
- idx = list(map(int, num_points))
- if len(idx) < 5:
- continue
-
- idx = np.array(idx).astype(np.int32)
- map_point = points[idx]
-
- shortest_dis = cal_nearest_distance(map_point,map_point,2)
- disk_area = math.pi * (radius[j] ** 2) / map_point.shape[0]
- expect_d = math.sqrt(2 * disk_area / 1.732)##using hexagon
-
- dis = np.square(shortest_dis - expect_d) / expect_d
- dis_mean = np.mean(dis)
- uniform_dis.append(coverage*dis_mean)
-
- uniform_dis = np.array(uniform_dis).astype(np.float32)
- uniform_measure[j, 0] = np.mean(uniform_dis)
-
- print('time cost for uniform :',time()-start_time)
- return uniform_measure
-
- with tf.Session() as sess:
-
- fieldnames = ["name", "p2f avg", "p2f std"]
-
- fieldnames += ["uniform_%d" % d for d in range(precentages.shape[0])]
-
- print("{:60s} ".format("name"), "|".join(["{:>15s}".format(d) for d in fieldnames[1:]]))
-
- D = '/userhome/zyc/PUGAN-master/evaluation_code/'
- pred_paths = glob(os.path.join(D, "*.xyz"))
-
- with open(os.path.join(os.getcwd(), "evaluation.csv"), "w") as f:
- writer = csv.DictWriter(f, fieldnames=fieldnames, restval="-", extrasaction="ignore")
- writer.writeheader()
-
- for pred_path in pred_paths:
-
- global_p2f = []
- global_density = []
- global_uniform = []
-
- row = {}
-
-
- row["name"] = os.path.basename(pred_path)
-
-
- #save_ply_property(np.squeeze(pred), cd_forward_value, pred_path[:-4]+"_cdF.ply", property_max=0.003, cmap_name="jet")
- #save_ply_property(np.squeeze(gt), cd_backward_value, pred_path[:-4]+"_cdB.ply", property_max=0.003, cmap_name="jet")
-
-
- if os.path.isfile(pred_path[:-4] + "_point2mesh_distance.txt"):
- point2mesh_distance = load(pred_path[:-4] + "_point2mesh_distance.txt")
- if point2mesh_distance.size == 0:
- continue
- point2mesh_distance = point2mesh_distance[:, 3]
- row["p2f avg"] = np.nanmean(point2mesh_distance)
- row["p2f std"] = np.nanstd(point2mesh_distance)
- global_p2f.append(point2mesh_distance)
-
- if os.path.isfile(pred_path[:-4] + "_disk_idx.txt"):
-
- idx_file = pred_path[:-4] + "_disk_idx.txt"
- radius_file = pred_path[:-4] + '_radius.txt'
- map_points_file = pred_path[:-4] + '_point2mesh_distance.txt'
-
- disk_measure = analyze_uniform(idx_file, radius_file, map_points_file)
- global_uniform.append(disk_measure)
-
- for i in range(2):
- row["uniform_%d" % i] = disk_measure[i, 0]
-
-
- writer.writerow(row)
- row = OrderedDict()
-
- if global_p2f:
- global_p2f = np.concatenate(global_p2f, axis=0)
- mean_p2f = np.nanmean(global_p2f)
- std_p2f = np.nanstd(global_p2f)
- row["p2f avg"] = mean_p2f
- row["p2f std"] = std_p2f
-
- if global_uniform:
- global_uniform = np.array(global_uniform)
- uniform_mean = np.mean(global_uniform, axis=0)
- for i in range(precentages.shape[0]):
- row["uniform_%d" % i] = uniform_mean[i, 0]
-
- writer.writerow(row)
- print("|".join(["{:>15.8f}".format(d) for d in row.values()]))
|