本文整理匯總了Python中tf_util.conv1d方法的典型用法代碼示例。如果您正苦於以下問題:Python tf_util.conv1d方法的具體用法?Python tf_util.conv1d怎麽用?Python tf_util.conv1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tf_util
的用法示例。
在下文中一共展示了tf_util.conv1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例2: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, chained_flowed, num_frames, is_training, bn_decay=None):
""" input is BxNx3, output Bxnum_class """
end_points = {}
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value // num_frames
l0_xyz = point_cloud[:, :, 0:3]
l0_xyz_flowed = chained_flowed
l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \
axis=-2)
l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1)
RADIUS1 = np.array([0.98, 0.99, 1.0], dtype='float32')
RADIUS2 = RADIUS1 * 2
RADIUS3 = RADIUS1 * 4
RADIUS4 = RADIUS1 * 8
l1_xyz, l1_xyz_flowed, l1_time, l1_points, l1_indices = meteor_chained_flow_module(l0_xyz, l0_xyz_flowed, l0_time, l0_points, npoint=2048, radius=RADIUS1, nsample=32*num_frames, mlp=[32,32,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,512], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_time, l4_points, l4_indices = meteor_direct_module(l3_xyz, l3_time, l3_points, npoint=64, radius=RADIUS4, nsample=32, mlp=[256,256,1024], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128], is_training, bn_decay, scope='fa_layer4')
##### debug
net = tf_util.conv1d(l0_points, 12, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例3: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, num_frames, is_training, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
end_points = {}
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value // num_frames
l0_xyz = point_cloud[:, :, 0:3]
l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \
axis=-2)
l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1)
RADIUS1 = np.array([0.98, 0.99, 1.0], dtype='float32')
RADIUS2 = RADIUS1 * 2
RADIUS3 = RADIUS1 * 4
RADIUS4 = RADIUS1 * 8
l1_xyz, l1_time, l1_points, l1_indices = meteor_direct_module(l0_xyz, l0_time, l0_points, npoint=2048, radius=RADIUS1, nsample=32, mlp=[32,32,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,512], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_time, l4_points, l4_indices = meteor_direct_module(l3_xyz, l3_time, l3_points, npoint=64, radius=RADIUS4, nsample=32, mlp=[256,256,1024], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128], is_training, bn_decay, scope='fa_layer4')
##### debug
net = tf_util.conv1d(l0_points, 12, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例4: fea_trans_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def fea_trans_net(input_fea, mlp_list, scope, is_training, bn_decay):
with tf.variable_scope(scope) as myscope:
net = input_fea
nlayer = len(mlp_list)
for i,num_out_channel in enumerate(mlp_list):
if i<nlayer-1:
net = tf_util.conv1d(net, num_out_channel, 1, padding='VALID', bn=True, is_training=is_training,
scope='conv%d'%i, bn_decay=bn_decay)
else:
net = tf_util.conv1d(net, num_out_channel, 1, padding='VALID', activation_fn=None, scope='conv%d'%i)
return net
示例5: classification_head
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def classification_head(pc, pc_fea, num_category, mlp_list, mlp_list2, is_training, bn_decay, scope, bn=True):
''' Classification head for both class id prediction and bbox delta regression
Inputs:
pc: [B, NUM_ROIS, NUM_POINT_PER_ROI, 3]
pc_fea: [B, NUM_ROIS, NUM_POINT_PER_ROI, NFEA]
num_category: scalar
Returns:
logits: [B, NUM_ROIS, NUM_CATEGORY]
probs: [B, NUM_ROIS, NUM_CATEGORY]
bbox_deltas: [B, NUM_ROIS, NUM_CATEGORY, (dz, dy, dx, log(dh), log(dw), log(dl))]
'''
with tf.variable_scope(scope) as myscope:
num_rois = pc.get_shape()[1].value
grouped_points = tf.concat((pc_fea, pc), -1)
for i,num_out_channel in enumerate(mlp_list):
grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1, 1],
padding='VALID', stride=[1,1], bn=bn, is_training=is_training,
scope='conv_prev_%d'%i, bn_decay=bn_decay)
new_points = tf.reduce_max(grouped_points, axis=2)
for i,num_out_channel in enumerate(mlp_list2):
new_points = tf_util.conv1d(new_points, num_out_channel, 1,
padding='VALID', stride=1, bn=bn, is_training=is_training,
scope='conv_post_%d'%i, bn_decay=bn_decay)
logits = tf_util.conv1d(new_points, num_category, 1, padding='VALID',
stride=1, scope='conv_classify', activation_fn=None)
probs = tf.nn.softmax(logits, 2)
bbox_deltas = tf_util.conv1d(new_points, num_category*6, 1, padding='VALID',
stride=1, scope='conv_bbox_regress', activation_fn=None)
bbox_deltas = tf.reshape(bbox_deltas, [-1, num_rois, num_category, 6])
return logits, probs, bbox_deltas
示例6: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Layer 1
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例7: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx4, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])
end_points['l0_xyz'] = l0_xyz
# Layer 1
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例8: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例9: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx5, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,2])
end_points['l0_xyz'] = l0_xyz
# Layer 1
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例10: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, bn_decay=None, num_class = NUM_CLASSES):
""" Part segmentation PointNet, input is BxNx3 (XYZ) """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = None
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
###########SEGMENTATION BRANCH
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='seg_fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='seg_dp1')
seg_pred = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='seg_fc2')
return seg_pred
示例11: corrsfea_extractor
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def corrsfea_extractor(xyz, is_training, bn_decay, scopename, reuse, nfea=64):
############################
# input
# xyz: (B x N x 3)
# output
# corrsfea: (B x N x nfea)
############################
num_point = xyz.get_shape()[1].value
l0_xyz = xyz
l0_points = l0_xyz
with tf.variable_scope(scopename) as myscope:
if reuse:
myscope.reuse_variables()
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module_msg(l0_xyz, l0_points, 256, [0.1,0.2], [64,64], [[64,64],[64,64],[64,128]], is_training, bn_decay, scope='corrs_layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='corrs_layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, use_xyz=False, is_training=is_training, bn_decay=bn_decay, scope='corrs_layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='corrs_fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='corrs_fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,64], is_training, bn_decay, scope='corrs_fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='corrs_fc1', bn_decay=bn_decay)
net = tf_util.conv1d(net, nfea, 1, padding='VALID', activation_fn=None, scope='corrs_fc2')
corrsfea = tf.reshape(net, [-1, num_point, nfea])
return corrsfea
示例12: trans_pred_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def trans_pred_net(xyz, flow, scopename, reuse, is_training, bn_decay=None, nfea=12):
#########################
# input
# xyz: (B x N x 3)
# flow: (B x N x 3)
# output
# pred_trans: (B x N x nfea)
#########################
num_point = xyz.get_shape()[1].value
with tf.variable_scope(scopename) as myscope:
if reuse:
myscope.reuse_variables()
l0_xyz = xyz
l0_points = flow
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module_msg(l0_xyz, l0_points, 256, [0.1,0.2], [64,64], [[64,64],[64,64],[64,128]], is_training, bn_decay, scope='trans_layer1', centralize_points=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='trans_layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, use_xyz=True, is_training=is_training, bn_decay=bn_decay, scope='trans_layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='trans_fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='trans_fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,64], is_training, bn_decay, scope='trans_fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='trans_fc1', bn_decay=bn_decay)
net = tf_util.conv1d(net, nfea, 1, padding='VALID', activation_fn=None, scope='trans_fc2')
pred_trans = tf.reshape(net, [-1, num_point, nfea])
return pred_trans
示例13: build_pointnet2_seg
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def build_pointnet2_seg(X, out_dim, is_training, bn_decay, scope):
n_points = X.get_shape()[1].value
l0_xyz = tf.slice(X, [0,0,0], [-1,-1,3])
l0_points = tf.slice(X, [0,0,3], [-1,-1,0])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[256,512,1024],
mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
[256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
[256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],
is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp1')
net = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None,
scope='fc2')
return net, 0
示例14: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def get_model(point_cloud, is_training, num_class, bn_decay=None):
""" Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud[:, :, :3]
l0_points = point_cloud[:, :, 3:]
end_points['l0_xyz'] = l0_xyz
# Layer 1
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Feature Propagation layers
l3_points_sem = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='sem_fa_layer1')
l2_points_sem = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_sem, [256,256], is_training, bn_decay, scope='sem_fa_layer2')
l1_points_sem = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_sem, [256,128], is_training, bn_decay, scope='sem_fa_layer3')
l0_points_sem = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_sem, [128,128,128], is_training, bn_decay, scope='sem_fa_layer4')
# FC layers
net_sem = tf_util.conv1d(l0_points_sem, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_fc1', bn_decay=bn_decay)
net_sem_cache = tf_util.conv1d(net_sem, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_cache', bn_decay=bn_decay)
# ins
l3_points_ins = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='ins_fa_layer1')
l2_points_ins = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_ins, [256,256], is_training, bn_decay, scope='ins_fa_layer2')
l1_points_ins = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_ins, [256,128], is_training, bn_decay, scope='ins_fa_layer3')
l0_points_ins = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_ins, [128,128,128], is_training, bn_decay, scope='ins_fa_layer4')
net_ins = tf_util.conv1d(l0_points_ins, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_fc1', bn_decay=bn_decay)
net_ins = net_ins + net_sem_cache
net_ins = tf_util.dropout(net_ins, keep_prob=0.5, is_training=is_training, scope='ins_dp1')
net_ins = tf_util.conv1d(net_ins, 5, 1, padding='VALID', activation_fn=None, scope='ins_fc4')
k = 30
adj_matrix = tf_util.pairwise_distance_l1(net_ins)
nn_idx = tf_util.knn_thres(adj_matrix, k=k)
nn_idx = tf.stop_gradient(nn_idx)
net_sem = tf_util.get_local_feature(net_sem, nn_idx=nn_idx, k=k)# [b, n, k, c]
net_sem = tf.reduce_max(net_sem, axis=-2, keep_dims=False)
net_sem = tf_util.dropout(net_sem, keep_prob=0.5, is_training=is_training, scope='sem_dp1')
net_sem = tf_util.conv1d(net_sem, num_class, 1, padding='VALID', activation_fn=None, scope='sem_fc4')
return net_sem, net_ins
示例15: build_pointnet2_seg
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import conv1d [as 別名]
def build_pointnet2_seg(scope, X, out_dims, is_training, bn_decay):
with tf.variable_scope(scope):
l0_xyz = tf.slice(X, [0,0,0], [-1,-1,3])
l0_points = tf.slice(X, [0,0,3], [-1,-1,0])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[256,512,1024],
mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
[256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
[256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],
is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp1')
results = []
for idx, out_dim in enumerate(out_dims):
current_result = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None, scope='fc2_{}'.format(idx))
results.append(current_result)
return results