PointNet语义分割预测代码部分最详解
如果你还没有理清预测部分的代码,那请耐⼼的看以下的代码注释,⼀定能够帮助您全部理清顺序
包含以下相关⽂件
meta
all_data_list :列举的是数据集中所包含的所有训练数据,以npy格式结尾;
anno_path : 是对应⽂件的相对存放路径;
area6_data_label :为验证训练模型时所⽤到数据集的数据⽂件列表;
class_name :数据集所包含的所有13类分类类别;
batch_inference.py :⽤于预测分类,将预测结果导出
collect_indoor3d_data :⽤于⽣成npy格式⽂件
indoor3d_uti.py :包含了作者写的功能性函数
batch_inference.py
以下只贴出⽐较关键的代码解释
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = del_path
GPU_INDEX = FLAGS.gpu
DUMP_DIR = FLAGS.dump_dir
if not ists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT =open(os.path.join(DUMP_DIR,''),'w')
LOG_FOUT.write(str(FLAGS)+'\n')
DIR = os.path.join(ROOT_DIR,'data/npy')
"""我⾃⼰创建的⽂件夹,提供collect_indoor3d_data.py⽣成的npy⽂件存储路径"""
ROOM_PATH_LIST =[os.path.join(DIR,line.rstrip())for line in _data_filelist)]
"""room_data_filelist为数据集npy按顺序排列,将其⽂件路径⼀⼀列出"""
NUM_CLASSES =13gaim
"""13类分类"""
定义全局变量,在m_g中创建dump⽂件夹,其中创建log⽇志⽂件,将所有全局变量写⼊⽇志⽂件内。
def evaluate():
is_training =Fal
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
"""定义输⼊输出占位符 """
# simple model
pred = get_model(pointclouds_pl, is_training_pl)
""" 模型训练 """
loss = get_loss(pred, labels_pl)
""" 计算损失"""
pred_softmax = tf.nn.softmax(pred)
bolton""" 将预测结果归⼀化"""
#tf.nn.softmax()就是把⼀个N*1的向量归⼀化为(0,1)之间的值
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
""" 设置保存模型"""
构建整个训练⽹络流程的图,很普通的操作
# Create a ssion
config = tf.ConfigProto()
config.gpu_options.allow_growth =True
config.allow_soft_placement =True
config.log_device_placement =True
ss = tf.Session(config=config)
# Restore variables from disk.
billie jean歌词翻译log_string("Model restored.")
创建会话,恢复模型,为预测做准备
ops ={'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'pred_softmax': pred_softmax,
'loss': loss}
total_correct =0
""" 总正确数"""
相信用英语怎么说total_en =0
""" 总个数 """
构建需要⽤到的输⼊的字典
for room_path in ROOM_PATH_LIST:
"""将272个房间npy⽂件路径⼀⼀进⾏预测"""
out_data_label_filename = os.path.baname(room_path)[:-4]+'_'
"""⽣成存储预测对应房间名称的点云数据的空⽂本"""
out_data_label_filename = os.path.join(DUMP_DIR, out_data_label_filename) """ 添加存放路径"""
out_gt_label_filename = os.path.baname(room_path)[:-4]+'_gt.txt'
"""⽣成存储实际对应房间名称的点云数据的空⽂本"""
out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename)
"""添加存储路径"""
print(room_path, out_data_label_filename)
a, b = eval_one_epoch(ss, ops, room_path, out_data_label_filename, out_gt_label_filename) """单次预测"""
total_correct += a
total_en += b
fout_out_filelist.write(out_data_label_filename+'\n')
fout_out_filelist.clo()
log_string('all room eval accuracy: %f'%(total_correct /float(total_en)))
将每个房间的⽂件依次进⾏预测
def eval_one_epoch(ss, ops, room_path, out_data_label_filename, out_gt_label_filename):
error_cnt =0
is_training =Fal
total_correct =0
total_en =0
loss_sum =0
total_en_class =[0for _ in range(NUM_CLASSES)]
""" 创建13个元素的列表,⽤于存放总的每个类的个数 """
total_correct_class =[0for _ in range(NUM_CLASSES)]
""" 创建13个元素的列表,存放总的每个类的正确个数"""
experiencedif FLAGS.visu:
fout =open(os.path.join(DUMP_DIR, os.path.baname(room_path)[:-4]+'_pred.obj'),'w')
fout_gt =open(os.path.join(DUMP_DIR, os.path.baname(room_path)[:-4]+'_gt.obj'),'w')
fout_data_label =open(out_data_label_filename,'w')
""" _⽤于写⼊预测点云数据 """
fout_gt_label =open(out_gt_label_filename,'w')
""" _gt.txt⽤于写⼊实际点云数据 """
current_data, current_label = 2blocks_wrapper_normalized(room_path, NUM_POINT)高中英语自我介绍
"""
将点云数据分块化
current_data包含的是所有块中的点云信息
current_label包含的是所有块中的点云的标签"""
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
# Get room dimension..获取房间尺⼨
data_label = np.load(room_path)
data = data_label[:,0:6]
max_room_x =max(data[:,0])
max_room_y =max(data[:,1])
max_room_z =max(data[:,2])
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
print(file_size)
以上都是对数据进⾏预处理,我们进⼊以下函数进⾏详细分析 current_data, current_label是如何⽣成的
current_data, current_label =2blocks_wrapper_normalized(room_path, NUM_POINT) room2blocks_wrapper_normalized() :提取出npy点云信息
def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=Fal, sample_num=None, sample_aug=1):
if data_label_filename[-3:]=='txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:]=='npy':
data_label = np.load(data_label_filename)
""" 将房间⾥的所有点信息读取出,包含xyzRGBL,命为data_label"""
el:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
将单个房间的所有点云信息读取出来后,进⼊room2block_plius_normalized()数据处理
room2blocks_plus_normalized():
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
"""
具有输⼊⽂件名和RGB预处理。对于每个块集中XYZ,将归⼀化XYZ添加为678通道
"""
data = data_label[:,0:6]
"""
将所有点的XYZRGB读出,data为列表
"""
光棍节的歌
data[:,3:6]/=255.0
"""
将RGB进⾏归⼀化
"""
label = data_label[:,-1].astype(np.uint8)
"""
婚庆培训
提取出所有label
"""
max_room_x =max(data[:,0])
max_room_y =max(data[:,1])
max_room_z =max(data[:,2])
"""
将最⼤的xyz取出,进⾏xyz归⼀化做准备
"""
data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
"""
data_batch包含了所有块的点云信息,包含xyzrgb
label_batch包含了所有块的点云标签
"""
new_data_batch = np.zeros((data_batch.shape[0], num_point,9))
"""
创建个(30,4096,9),将所有信息都填⼊新的数组中
"""
for b in range(data_batch.shape[0]):
"""
⼀共有30个块,每个块进⾏读取
"""
new_data_batch[b,:,6]= data_batch[b,:,0]/max_room_x
new_data_batch[b,:,7]= data_batch[b,:,1]/max_room_y
new_data_batch[b,:,8]= data_batch[b,:,2]/max_room_z
"""
将房间的归⼀化坐标填⼊678
"""
minx =min(data_batch[b,:,0])
"""
最⼩x坐标
"""
miny =min(data_batch[b,:,1])
"""
最⼩y坐标
"""
data_batch[b,:,0]-=(minx+block_size/2)
data_batch[b,:,1]-=(miny+block_size/2)
new_data_batch[:,:,0:6]= data_batch
return new_data_batch, label_batch
data包含单个房间的所有xyzRGB信息,并将RGB归⼀化,利⽤room2blocks将点云数据进⾏分块化,⽣成data_batch(包含30个块的4096个点云xyzRGB信息), label_batch(对应的每个快的标签),创建new_data_batch(30,4096,9),将xyz坐标除最⼤xyz坐标作为归⼀化坐标填⼊new_data_batch的678。
room2blocks():将data进⾏分块化成1m x 1m的block
sle
def room2blocks(data, label, num_point, block_size=1.0, stride=1.0,
random_sample=Fal, sample_num=None, sample_aug=1):
asrt(stride<=block_size)
#np.amax()
limit = np.amax(data,0)[0:3]
"""
取出xyz的最⼤坐标,为确定⼀共有多少block
"""
# Get the corner location for our sampling blocks 获取采样块的拐⾓位置tubi
xbeg_list =[]
ybeg_list =[]
if not random_sample:
num_block_x =il((limit[0]- block_size)/ stride))+1
"""
x⽅向的分块数为6
"""
num_block_y =il((limit[1]- block_size)/ stride))+1
"""
y⽅向的分块数为5
"""
for i in range(num_block_x):
"""先定x坐标,按y⽅向逐个移动分块"""
for j in range(num_block_y):
xbeg_list.append(i*stride)
ybeg_list.append(j*stride)
"""
将块索引填⼊列表中
"""
⽤画图来解释为如下图
⾸先确定x坐标为0,然后以y⽅向逐块分块,当分好后,x坐标加1,再按y⽅向分块,直⾄将所有房间全部分块好。接下来继续看代码。