|
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- """
- @Author: Yue Wang
- @Contact: yuewangx@mit.edu
- @File: model.py
- @Time: 2018/10/13 6:35 PM
-
- Modified by
- @Author: An Tao
- @Contact: ta19@mails.tsinghua.edu.cn
- @Time: 2020/3/9 9:32 PM
-
- Modified by
- @Author: Dinghao Yang
- @Contact: dinghaoyang@gmail,cin
- @Time: 2020/9/28 7:29 PM
-
- Modified by
- @Author: Yu Deng
- @Contact: dengy02@pcl.ac.cn
- @Time: 2022/7/5 14:20 PM
- """
-
- import numpy as np
- import tensorflow as tf
- from sklearn import manifold
-
-
- def knn(x, k):
- x_trans = tf.transpose(x, [0, 2, 1])
- inner = -2*tf.matmul(x_trans, x)
- xx = tf.reduce_sum(x**2, axis=1, keepdims=True)
- xx_trans = tf.transpose(xx, [0, 2, 1])
- pairwise_distance = -xx - inner - xx_trans
- # tf.math.top_k, tf.raw_ops.TopKV2
- idx = tf.math.top_k(input=pairwise_distance, k=k)[1] # (batch_size, num_points, k)
- print("knn idx shape", idx.shape) # (32, 1024, 2)
- print("batch_size, num_dims, num_points", x.shape) # (32, 64, 1024)
- return idx
-
-
- def get_graph_feature(x, k=20, idx=None, dim9=False):
- # x shape: (2, 128, 1024)
- # tf.reshape(x, shape=(batch_size, -1, num_points)) shape: (2, 128, 1024)
- batch_size = x.shape[0]
- num_dims = x.shape[1]
- num_points = x.shape[2]
- # x = tf.reshape(x, shape=(batch_size, -1, num_points))
- k = k if x.shape[-1] >= k else x.shape[-1] ## add
- # print("x get_graph_feature shape", x.shape)
- if idx is None:
- if dim9 == False:
- idx = knn(x, k=k) # (batch_size, num_points, k)
- else:
- idx = knn(x[:, 6:], k=k)
-
- # idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
- idx_range = tf.range(start=0, limit=batch_size, dtype=tf.int32) # start, limit
- idx_base = tf.reshape(idx_range, (-1, 1, 1))*num_points
- idx = idx + idx_base
-
- # idx = idx.view(-1)
- # idx = tf.reshape(idx, -1)
- idx = tf.reshape(idx, (-1, ))
-
- # _, num_dims, _ = x.size()
- # _, num_dims, _ = x.shape
-
- ## batch_size, -1, num_points
- x = tf.transpose(x, perm=[0, 2, 1]) # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
-
- # feature = x.view(batch_size*num_points, -1)[idx, :]
- feature = tf.gather(params=tf.reshape(x, (batch_size*num_points, num_dims)), indices=idx)
-
- # feature = feature.view(batch_size, num_points, k, num_dims)
- feature = tf.reshape(feature, (batch_size, num_points, k, num_dims))
- # x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
- x = tf.reshape(x, (batch_size, num_points, 1, num_dims))
- x = tf.tile(x, [1, 1, k, 1])
- print('x after repeat shape', x.shape) # (32, 1024, 2, 64)
-
- # feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
- feature = tf.concat([feature-x, x], axis=3)
- feature = tf.transpose(feature, [0, 3, 1, 2])
- print("feature shape", feature.shape) # (32, 128, 1024, 2)
- print("batch_size, 2*num_dims, num_points, k", batch_size, 2*num_dims, num_points, k) # 32 128 1024 2
- print()
-
- return feature # (batch_size, 2*num_dims, num_points, k)
-
-
- class PointManifold_LLE(tf.keras.Model):
- def __init__(self, args, output_channels=40): # mode='fan_in', distribution='uniform'
- super(PointManifold_LLE, self).__init__()
- self.args = args
- self.k = args.k
- self.batch_size = args.batch_size
- self.num_points = args.num_points
- self.emb_dims = args.emb_dims
- self.dropout = args.dropout
-
- # (batch_size, 3*2, num_points, k)
- # self.conv1 = tf.keras.Sequential(
- # [
- # tf.keras.layers.Conv2D(64, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1), # axis=-1, momentum=0.1, epsilon=0.00001
- # tf.keras.layers.LeakyReLU(alpha=0.2)
- # ])
- self.layers1_0 = tf.keras.layers.Conv2D(64, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers1_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers1_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv2 = tf.keras.Sequential(
- # [
- # tf.keras.layers.Conv2D(64, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)
- # ])
- self.layers2_0 = tf.keras.layers.Conv2D(64, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers2_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers2_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv3 = tf.keras.Sequential(
- # [
- # tf.keras.layers.Conv2D(128, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)
- # ])
- self.layers3_0 = tf.keras.layers.Conv2D(128, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers3_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers3_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv4 = tf.keras.Sequential(
- # [
- # tf.keras.layers.Conv2D(256, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)
- # ])
- self.layers4_0 = tf.keras.layers.Conv2D(256, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers4_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers4_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv5 = tf.keras.Sequential(
- # [
- # tf.keras.layers.Conv1D(self.emb_dims, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)
- # ])
- self.layers5_0 = tf.keras.layers.Conv1D(self.emb_dims, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers5_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers5_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- self.linear1 = tf.keras.layers.Dense(512, use_bias=False)
- self.bn6 = tf.keras.layers.BatchNormalization(axis=1)
- self.dp1 = tf.keras.layers.Dropout(rate=self.dropout)
- self.linear2 = tf.keras.layers.Dense(256)
- self.bn7 = tf.keras.layers.BatchNormalization(axis=1)
- self.dp2 = tf.keras.layers.Dropout(rate=self.dropout)
- self.linear3 = tf.keras.layers.Dense(output_channels)
-
- def call(self, x, training=True):
- # print("ori x:", x.shape)
- batch_size = x.shape[0]
-
- x = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k)
- # print("x get_graph_feature shape", x.shape)
- # x = self.conv1(x) # (batch_size, 3*2, num_points, k) -> (batch_size, 64, num_points, k)
- x = self.layers1_0(x)
- x = self.layers1_1(x, training=training)
- x = self.layers1_2(x)
- # x1 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points)
- x1 = tf.math.reduce_max(x, axis=-1)
- # print("x1 shape", x1.shape)
-
- x = get_graph_feature(x1, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k)
- # x = self.conv2(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k)
- x = self.layers2_0(x)
- x = self.layers2_1(x, training=training)
- x = self.layers2_2(x)
- # x2 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points)
- x2 = tf.math.reduce_max(x, axis=-1)
-
- x = get_graph_feature(x2, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k)
- # x = self.conv3(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 128, num_points, k)
- x = self.layers3_0(x)
- x = self.layers3_1(x, training=training)
- x = self.layers3_2(x)
- # x3 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points)
- x3 = tf.math.reduce_max(x, axis=-1)
-
- x = get_graph_feature(x3, k=self.k) # (batch_size, 128, num_points) -> (batch_size, 128*2, num_points, k)
- # x = self.conv4(x) # (batch_size, 128*2, num_points, k) -> (batch_size, 256, num_points, k)
- x = self.layers4_0(x)
- x = self.layers4_1(x, training=training)
- x = self.layers4_2(x)
- # x4 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 256, num_points, k) -> (batch_size, 256, num_points)
- x4 = tf.math.reduce_max(x, axis=-1)
-
- # x = torch.cat((x1, x2, x3, x4), dim=1) # (batch_size, 64+64+128+256, num_points)
- x = tf.keras.layers.concatenate([x1, x2, x3, x4], axis=1) # (batch_size, 64+64+128+256, num_points)
-
- # max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,strides=1, padding='valid')
- # x = self.conv5(x) # (batch_size, 64+64+128+256, num_points) -> (batch_size, emb_dims, num_points)
- x = self.layers5_0(x)
- x = self.layers5_1(x, training=training)
- x = self.layers5_2(x)
-
- x1 = tf.keras.layers.MaxPooling1D(pool_size=x.shape[-1], data_format='channels_first')(x)
- x1 = tf.squeeze(x1, axis=-1)
- x2 = tf.keras.layers.AveragePooling1D(pool_size=x.shape[-1], data_format='channels_first')(x)
- x2 = tf.squeeze(x2, axis=-1)
- print("x1 adaptive_max_pool1d shape", x1.shape) # (32, 1024)
- print("x2 adaptive_avg_pool1d shape", x2.shape) # (32, 1024)
- x = tf.keras.layers.concatenate((x1, x2), 1) # (batch_size, emb_dims*2)
-
- # tf.keras.layers.LeakyReLU
- x = self.linear1(x)
- x = self.bn6(x, training=training)
- x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) # (batch_size, emb_dims*2) -> (batch_size, 512)
- # print("linear1 x", x.shape)
- x = self.dp1(x, training=training)
- # print()
- x = self.linear2(x)
- x = self.bn7(x, training=training)
- x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) # (batch_size, 512) -> (batch_size, 256)
- x = self.dp2(x, training=training)
- # print("linear2 x", x.shape)
- x = self.linear3(x) # (batch_size, 256) -> (batch_size, output_channels)
- print("x final shape", x.shape) # (32, 40)
-
- return x
-
-
- class PointManifold_NNML(tf.keras.Model):
- def __init__(self, args, output_channels=40):
- super(PointManifold_NNML, self).__init__()
- self.args = args
- self.k = args.k
- self.hyper_times = args.hyper_times
- self.emb_dims = args.emb_dims
- self.dropout = args.dropout
-
- # self.conv0_0 = tf.keras.Sequential(
- # [tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers1_0 = tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers1_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers1_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv0_1 = tf.keras.Sequential(
- # [tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers2_0 = tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers2_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers2_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv0_2 = tf.keras.Sequential(
- # [tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers3_0 = tf.keras.layers.Conv1D(2, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers3_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers3_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv1 = tf.keras.Sequential(
- # [tf.keras.layers.Conv2D(128*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers4_0 = tf.keras.layers.Conv2D(128*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers4_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers4_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv2 = tf.keras.Sequential(
- # [tf.keras.layers.Conv2D(128*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers5_0 = tf.keras.layers.Conv2D(128*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers5_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers5_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv3 = tf.keras.Sequential(
- # [tf.keras.layers.Conv2D(256*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers6_0 = tf.keras.layers.Conv2D(256*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers6_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers6_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv4 = tf.keras.Sequential(
- # [tf.keras.layers.Conv2D(512*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers7_0 = tf.keras.layers.Conv2D(512*self.hyper_times, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers7_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers7_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- # self.conv5 = tf.keras.Sequential(
- # [tf.keras.layers.Conv1D(self.emb_dims, kernel_size=1, use_bias=False, padding='same', data_format="channels_first"),
- # tf.keras.layers.BatchNormalization(axis=1),
- # tf.keras.layers.LeakyReLU(alpha=0.2)]
- # )
- self.layers8_0 = tf.keras.layers.Conv1D(self.emb_dims, kernel_size=1, use_bias=False, padding='same', data_format="channels_first")
- self.layers8_1 = tf.keras.layers.BatchNormalization(axis=1)
- self.layers8_2 = tf.keras.layers.LeakyReLU(alpha=0.2)
-
- self.linear1 = tf.keras.layers.Dense(512, use_bias=False)
- self.bn6 = tf.keras.layers.BatchNormalization(axis=1)
- self.dp1 = tf.keras.layers.Dropout(rate=self.dropout)
- self.linear2 = tf.keras.layers.Dense(256)
- self.bn7 = tf.keras.layers.BatchNormalization(axis=1)
- self.dp2 = tf.keras.layers.Dropout(rate=self.dropout)
- self.linear3 = tf.keras.layers.Dense(output_channels)
-
- def call(self, x, training=True):
- # print("NNML x shape", x.shape)
- # x (b, 3, n)
- # x = LLE(x, 12, 2)
-
- # x_2d_z
- x01 = tf.stack((x[:,0,:], x[:,1,:]), axis=1) # x[:,:2,:]
- # x_2d_z = self.conv0_0(x01) # (b, 2, n)
- x01 = self.layers1_0(x01)
- x01 = self.layers1_1(x01, training=training)
- x_2d_z = self.layers1_2(x01)
- # y = tf.raw_ops.Mul(x=a, y=b, name=None)
- x_reshape = tf.expand_dims(input=x[:, 2, :], axis=1)
- x_2d_z = tf.keras.layers.multiply([x_2d_z, x_reshape]) # (b, 2, n) x (b, 1, n)
-
- # x_2d_y
- x02 = tf.stack((x[:,0,:], x[:,2,:]), axis=1) # x[:,[0,2],:]
- # x_2d_y = self.conv0_1(x02)
- x02 = self.layers2_0(x02)
- x02 = self.layers2_1(x02, training=training)
- x_2d_y = self.layers2_2(x02)
- x_reshape = tf.expand_dims(input=x[:, 1, :], axis=1)
- x_2d_y = tf.keras.layers.multiply([x_2d_y, x_reshape])
-
- # x_2d_x
- x12 = tf.stack((x[:,1,:], x[:,2,:]), axis=1) # x[:,1:3,:]
- # x_2d_x = self.conv0_2(x12)
- x12 = self.layers3_0(x12)
- x12 = self.layers3_1(x12, training=training)
- x_2d_x = self.layers3_2(x12)
- x_reshape = tf.expand_dims(input=x[:, 0, :], axis=1)
- x_2d_x = tf.keras.layers.multiply([x_2d_x, x_reshape])
-
- # concatenate channels
- x = tf.keras.layers.concatenate([x, x_2d_x, x_2d_y, x_2d_z], axis=1) # (b, 9, n)
-
- x = get_graph_feature(x, k=self.k, dim9=True) # (batch_size, 7, num_points) -> (batch_size, 7x2, num_points, k)
- # x = self.conv1(x) # (batch_size, 7*2, num_points, k) -> (batch_size, 128, num_points, k) (8, 128, 1024, 2)
- x = self.layers4_0(x)
- x = self.layers4_1(x, training=training)
- x = self.layers4_2(x)
- # x1 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points)
- x1 = tf.math.reduce_max(x, axis=-1) # (8, 128, 1024)
-
- x = get_graph_feature(x1, k=self.k) # (batch_size, 128, num_points) -> (batch_size, 128*2, num_points, k)
- # x = self.conv2(x) # (batch_size, 128*2, num_points, k) -> (batch_size, 128, num_points, k)
- x = self.layers5_0(x)
- x = self.layers5_1(x, training=training)
- x = self.layers5_2(x)
- # x2 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 128, num_points, k) -> (batch_size, 128, num_points)
- x2 = tf.math.reduce_max(x, axis=-1)
-
- x = get_graph_feature(x2, k=self.k) # (batch_size, 128, num_points) -> (batch_size, 128*2, num_points, k)
- # x = self.conv3(x) # (batch_size, 128*2, num_points, k) -> (batch_size, 256, num_points, k)
- x = self.layers6_0(x)
- x = self.layers6_1(x, training=training)
- x = self.layers6_2(x)
- # x3 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 256, num_points, k) -> (batch_size, 256, num_points)
- x3 = tf.math.reduce_max(x, axis=-1)
-
- x = get_graph_feature(x3, k=self.k) # (batch_size, 256, num_points) -> (batch_size, 256*2, num_points, k)
- # x = self.conv4(x) # (batch_size, 256*2, num_points, k) -> (batch_size, 512, num_points, k)
- x = self.layers7_0(x)
- x = self.layers7_1(x, training=training)
- x = self.layers7_2(x)
- # x4 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 512, num_points, k) -> (batch_size, 512, num_points)
- x4 = tf.math.reduce_max(x, axis=-1)
-
- # x = torch.cat((x1, x2, x3, x4), dim=1) # (batch_size, 128+128+256+512, num_points)
- x = tf.keras.layers.concatenate((x1, x2, x3, x4), axis=1) # (batch_size, 128+128+256+512, num_points)
-
- # x = self.conv5(x) # (batch_size, 128+128+256+512, num_points) -> (batch_size, emb_dims, num_points)
- x = self.layers8_0(x)
- x = self.layers8_1(x, training=training)
- x = self.layers8_2(x)
- x1 = tf.keras.layers.MaxPooling1D(pool_size=x.shape[-1], data_format='channels_first')(x)
- x1 = tf.squeeze(x1, axis=-1) # (batch_size, emb_dims, num_points) -> (batch_size, emb_dims)
- x2 = tf.keras.layers.AveragePooling1D(pool_size=x.shape[-1], data_format='channels_first')(x)
- x2 = tf.squeeze(x2, axis=-1) # (batch_size, emb_dims, num_points) -> (batch_size, emb_dims)
- print("x1 adaptive_max_pool1d shape", x1.shape)
- print("x2 adaptive_avg_pool1d shape", x2.shape)
- x = tf.keras.layers.concatenate((x1, x2), 1) # (batch_size, emb_dims*2)
-
- # tf.keras.layers.LeakyReLU
- x = self.linear1(x)
- x = self.bn6(x, training=training)
- x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) # (batch_size, emb_dims*2) -> (batch_size, 512)
- x = self.dp1(x, training=training)
- x = self.linear2(x)
- x = self.bn7(x, training=training)
- x = tf.keras.layers.LeakyReLU(alpha=0.2)(x) # (batch_size, 512) -> (batch_size, 256)
- x = self.dp2(x, training=training)
- x = self.linear3(x) # (batch_size, 256) -> (batch_size, output_channels)
- print("x final shape", x.shape)
-
- return x
-
-
- if __name__ == "__main__":
- import torch
- import argparse
- parser = argparse.ArgumentParser(description='Point Cloud Recognition')
- parser.add_argument('--num_points', type=int, default=1024,
- help='num of points to use')
- parser.add_argument('--dropout', type=float, default=0.5,
- help='initial dropout rate')
- parser.add_argument('--emb_dims', type=int, default=1024, metavar='N',
- help='Dimension of embeddings')
- parser.add_argument('--k', type=int, default=2, metavar='N',
- help='Num of nearest neighbors to use')
- parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
- help='Size of batch)')
- parser.add_argument('--hyper_times', type=int, default=1, metavar='N',
- help='The time of model size')
- parser.add_argument('--eval', type=bool, default=False,
- help='evaluate the model')
-
- args = parser.parse_args()
-
- batch_size = 32
-
- x = tf.convert_to_tensor(np.random.rand(batch_size, 5, 1024))
- x = tf.cast(x, dtype=tf.float32)
- model = PointManifold_LLE(args)
- # model.build(input_shape=(batch_size, 5, 1024))
- # model.summary()
- # for v in model.trainable_variables:
- # print(v.name,v.shape,v.dtype)
- logits = model(x, training=False)
- print("LLE RESULT TF", logits.shape)
- print()
-
- # # input shape: (b, 3, n)
- # # output shape: (batch_size, output_channels)
- # x = tf.convert_to_tensor(np.random.rand(batch_size, 3, 1024))
- # x = tf.cast(x, dtype=tf.float32)
- # model = PointManifold_NNML(args)
- # model.build(input_shape=(batch_size, 3, 1024))
- # # model.summary()
- # # for v in model.trainable_variables:
- # # print(v.name,v.shape,v.dtype)
- # logits = model(x, training=False)
- # print("NNML RESULT TF", logits.shape)
-
-
- # x = np.random.randn(1024, 5) # n, d
- # x = tf.convert_to_tensor(x, dtype=tf.float32)
- # k = 20
- # out_dim = 3
- # get_lle = get_LLE(x, k, out_dim)
- # print("get_lle type", isinstance(get_lle, np.ndarray))
- # print("get_lle shape", get_lle.shape) # n, out_dim
- # print()
-
- # # # x (b, d, n)
- # # x = np.random.randn(32, 5, 1024) # b, d, n
- # # x = np.transpose(x, axes=(0,2,1))
- # # # x = tf.transpose(x, perm=[0,2,1]) # b, n, d
- # # print("b, n, d shape:", x.shape)
- # # k = 20
- # # out_dim = 3
- # # points_r = tf.convert_to_tensor([get_LLE(points, k, out_dim) for points in x])
- # # print("points_r shape", points_r.shape)
-
- # k = 5
- # # input x (b, d, n)
- # x = np.random.randn(32, 8, 16).astype(np.float32)
- # x = tf.convert_to_tensor(x, dtype=tf.float32)
- # samples = adaptive_sample(x, k) # (b, n, k)
- # print("samples shape", samples.shape)
-
- # # # input x (b, d, n)
- # x_pca = pca_points(x) # [32, 5, 16], (32, 5, 16) [b, k, n]
- # print("x_pca shape", x_pca.shape)
-
-
- # idx=None
- # dim9 = False
- # k = 2
- # # x = np.random.randint(1, 5, (2, 2, 3)).astype(np.int64) # 8, 128, 1024
- # x = np.array([[[1, 2], [4, 1]], [[3, 4], [1, 5]]], dtype=np.int64)
- # # print("x", x.tolist())
- # x = tf.convert_to_tensor(x, dtype=tf.int64)
-
- # x_new = get_graph_feature(x, k=2, idx=None, dim9=False)
- # print("x_new", x_new)
- # # x_new tf.Tensor(
- # # [[[[ 0 1]
- # # [ 0 -1]]
-
- # # [[ 0 -3]
- # # [ 0 3]]
-
- # # [[ 1 1]
- # # [ 2 2]]
-
- # # [[ 4 4]
- # # [ 1 1]]]
-
-
- # # [[[ 0 1]
- # # [ 0 -1]]
-
- # # [[ 0 4]
- # # [ 0 -4]]
-
- # # [[ 3 3]
- # # [ 4 4]]
-
- # # [[ 1 1]
- # # [ 5 5]]]], shape=(2, 4, 2, 2), dtype=int64)
-
-
- # # x shape: (2, 128, 1024)
- # # tf.reshape(x, shape=(batch_size, -1, num_points)) shape: (2, 128, 1024)
- # # 8, 128, 1024: batch_size, channels, num_points
- # batch_size = x.shape[0]
- # channels = x.shape[1]
- # num_points = x.shape[2]
- # # x = tf.reshape(x, shape=(batch_size, -1, num_points))
- # k = k if x.shape[-1] >= k else x.shape[-1] ## add
- # # print("x get_graph_feature shape", x.shape)
- # if idx is None:
- # if dim9 == False:
- # # print("x dim9 == False shape", x.shape)
- # idx = knn(x, k=k) # (batch_size, num_points, k)
- # else:
- # # print("x[:, 6:] dim9 == True shape", x[:, 6:].shape)
- # idx = knn(x[:, 6:], k=k)
- # # idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
- # idx_range = tf.range(start=0, limit=batch_size, dtype=tf.int32) # start, limit
- # idx_base = tf.reshape(idx_range, (-1, 1, 1))*num_points
- # idx = idx + idx_base
-
- # # idx = idx.view(-1)
- # # idx = tf.reshape(idx, -1)
- # idx = tf.reshape(idx, (-1, ))
- # # print("idx shape", idx.shape)
- # # _, num_dims, _ = x.size()
- # _, num_dims, _ = x.shape
- # assert num_dims == channels
- # ## batch_size, channels, num_points
- # # 8, 128, 1024: batch_size, channels, num_points
- # x = tf.transpose(x, perm=[0, 2, 1]) # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
- # print()
- # # (8, 1024, 128)
- # print("x shape is:", x.shape)
- # # (8192, 128)
- # print("tf.reshape(x, (batch_size*num_points, -1) shape", tf.reshape(x, (batch_size*num_points, -1)).shape)
- # print()
- # print("tf.reshape(x, (batch_size*num_points, num_dims)) shape", tf.reshape(x, (batch_size*num_points, num_dims)).shape)
- # print()
- # print(tf.reshape(x, (batch_size*num_points, -1)))
- # print(tf.reshape(x, (batch_size*num_points, num_dims)))
- # feature = tf.gather(params=tf.reshape(x, (batch_size*num_points, num_dims)), indices=idx)
- # print("feature shape", feature.shape)
- # # print("feature", feature)
- # print()
-
- # # idx = torch.from_numpy(idx.numpy()).to(torch.long)
- # # x = torch.from_numpy(x.numpy()).to(torch.long)
- # # feature_t = x.view(batch_size*num_points, -1)[idx, :]
- # # print("feature_t shape", feature_t.shape)
- # # print("feature_t", feature_t)
-
- # print()
- # print()
- # print()
- # x = np.random.randint(1, 20, (2, 3, 2)).astype(np.int64) # 8, 128, 1024
- # print("x ori", x)
- # # x = np.array([[[1, 2], [4, 1]], [[3, 4], [1, 5]]], dtype=np.int64)
- # x = tf.convert_to_tensor(x, dtype=tf.int64)
- # # 8, 128, 1024: batch_size, channels, num_points
- # batch_size = x.shape[0]
- # channels = x.shape[1]
- # num_points = x.shape[2]
- # xt = tf.transpose(x, perm=[0, 2, 1]) # (batch_size, num_points, num_dims)
- # print("xt", xt)
- # xr = tf.reshape(x, [2, 2, 3])
- # print("xr", xr)
- # # feature = x.view(batch_size*num_points, -1)[idx, :]
- # # feature = tf.gather(params=tf.reshape(x, (batch_size*num_points, num_dims)), indices=idx)
|