|
- import time, os, sys, glob, argparse
- os.environ['TL_BACKEND'] = 'tensorflow'
- import importlib
- import numpy as np
- import tensorlayer as tl
- import h5py
- import random
- random.seed() #这句对下面的random.sample没影响
- from trainer import Trainer
- from pcc_model import PCCModel
- from tensorlayer.dataflow import Dataset, Dataloader
- from dataprocess.inout_points import points2voxels #与框架无关
-
- os.environ["CUDA_VISIBLE_DEVICES"] = '0'
-
- class PCDataset(Dataset): #已改
-
- def __init__(self, files):
- self.files = files
-
- def __len__(self):
- return len(self.files)
-
- def __getitem__(self, idx):
- filedir = self.files[idx]
- points = h5py.File(filedir, 'r')['data'][:].astype('int') #这个数据集只有xyz
- x_np = points2voxels([points],64).astype('float32') #转成voxel
- x = tl.convert_to_tensor(x_np,dtype=tl.float32) #(8, 64, 64, 64, 1)
- x = tl.ops.reshape(x,(x.shape[1],x.shape[2],x.shape[3],x.shape[4]))
- return x,x
-
- def parse_args(): #已改
- parser = argparse.ArgumentParser(
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
- parser.add_argument("--dataset", type=str, default='/userhome/PCGCv1/training-data-set/points64_part1/')
- parser.add_argument("--dataset_num", type=int, default=3.0e5) #2.95e5可以先搞个小的试试 300,001 个文件
- parser.add_argument(
- "--alpha", type=float, default=6, dest="alpha", #10
- help="weights for distoration.")
- parser.add_argument(
- "--beta", type=float, default=3., dest="beta",
- help="Weight for empty position.")
- parser.add_argument(
- "--gamma", type=float, default=1.0, dest="gamma",
- help="Weight for hyper likelihoods.")
- parser.add_argument(
- "--delta", type=float, default=1., dest="delta",
- help="Weight for latent likelihoods.")
- parser.add_argument(
- "--lr", type=float, default=2e-4, dest="lr",
- help="learning rate.")
- parser.add_argument("--epoch", type=int, default=35) #45
- parser.add_argument(
- "--prefix", type=str, default='hyper_tl1', dest="prefix",
- help="prefix of checkpoints/logger.")
- parser.add_argument(
- "--init_ckpt", type=str, default='/userhome/PCGCv1/tensorlayer2/ckpts/hyper_tl1/epoch_35.npz', dest="init_ckpt",
- help='initial checkpoint directory.')
- parser.add_argument(
- "--lower_bound", type=float, default=1e-9, dest="lower_bound",
- help="lower bound of scale. 1e-5 or 1e-9")
- parser.add_argument(
- "--batch_size", type=int, default=32, dest="batch_size", #32不能用在t4显卡上,显存不够,t4只能用16
- help='batch_size')
-
- args = parser.parse_args()
-
- return args
-
- class TrainingConfig(): #已改
- def __init__(self, logdir, ckptdir, init_ckpt, alpha, beta, gamma, delta, lr):
- self.logdir = logdir
- if not os.path.exists(self.logdir): os.makedirs(self.logdir)
- self.ckptdir = ckptdir
- if not os.path.exists(self.ckptdir): os.makedirs(self.ckptdir)
- self.init_ckpt = init_ckpt
- self.alpha = alpha
- self.beta = beta
- self.lr = lr
- self.gamma = gamma # weight of hyper prior.
- self.delta = delta # weight of latent representation.
-
- #已改
- if __name__ == '__main__':
- args = parse_args()
- RATIO_EVAL = 9 #
-
- training_config = TrainingConfig(
- logdir=os.path.join('./logs', args.prefix),
- ckptdir=os.path.join('./ckpts', args.prefix), #保存当前训练的模型
- init_ckpt=args.init_ckpt, #初始化模型
- alpha=args.alpha,
- beta=args.beta,
- gamma=args.gamma,
- delta=args.delta,
- lr=args.lr)
- # model
- model = PCCModel(lower_bound=args.lower_bound)
- model.init_build(tl.layers.Input(shape=(1, 64, 64, 64, 1))) #这一步要有,走一遍前向推理,把前面没填的in_channels这些参数补上,再执行build
-
- # trainer
- train_dataloader_len = 1000
- test_dataloader_len = 12
- trainer = Trainer(config=training_config, model=model, train_dataloader_len=train_dataloader_len, test_dataloader_len=test_dataloader_len)
-
- # dataset
- filedirs = sorted(glob.glob(args.dataset+'*.h5'))[:int(args.dataset_num)]
- train_dir = filedirs[len(filedirs)//RATIO_EVAL:]
- test_dir = filedirs[:len(filedirs)//RATIO_EVAL]
- # training
- for epoch in range(0, args.epoch):
- if epoch>5 and trainer.config.lr > 1e-5 and epoch%3==0: #11
- model = PCCModel(lower_bound=args.lower_bound)
- model.init_build(tl.layers.Input(shape=(1, 64, 64, 64, 1)))
- trainer.train_one_step = trainer.reset(model,trainer.config.init_ckpt,max(trainer.config.lr/2.0, 1e-5)) #update lr
-
- train_list = random.sample(train_dir, train_dataloader_len*args.batch_size) #1000 每个epoch的迭代次数 每次随机生成,保证每个epoch的样本均匀分布,偶尔有重复的
- train_dataset = PCDataset(train_list)
- train_dataset = tl.dataflow.FromGenerator(train_dataset,output_types=(tl.float32,tl.float32))
- train_dataloader = Dataloader(train_dataset, batch_size=args.batch_size)
- trainer.train(train_dataloader)
-
- eval_list = random.sample(test_dir, test_dataloader_len*args.batch_size) #
- test_dataset = PCDataset(eval_list)
- test_dataset = tl.dataflow.FromGenerator(test_dataset,output_types=(tl.float32,tl.float32))
- test_dataloader = Dataloader(test_dataset, batch_size=args.batch_size)
- trainer.test(test_dataloader, 'Test')
|