本文整理汇总了Python中pointnet_util.pointnet_sa_module_msg方法的典型用法代码示例。如果您正苦于以下问题:Python pointnet_util.pointnet_sa_module_msg方法的具体用法?Python pointnet_util.pointnet_sa_module_msg怎么用?Python pointnet_util.pointnet_sa_module_msg使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pointnet_util
的用法示例。
在下文中一共展示了pointnet_util.pointnet_sa_module_msg方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_model
# 需要导入模块: import pointnet_util [as 别名]
# 或者: from pointnet_util import pointnet_sa_module_msg [as 别名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
示例2: get_model
# 需要导入模块: import pointnet_util [as 别名]
# 或者: from pointnet_util import pointnet_sa_module_msg [as 别名]
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例3: corrsfea_extractor
# 需要导入模块: import pointnet_util [as 别名]
# 或者: from pointnet_util import pointnet_sa_module_msg [as 别名]
def corrsfea_extractor(xyz, is_training, bn_decay, scopename, reuse, nfea=64):
############################
# input
# xyz: (B x N x 3)
# output
# corrsfea: (B x N x nfea)
############################
num_point = xyz.get_shape()[1].value
l0_xyz = xyz
l0_points = l0_xyz
with tf.variable_scope(scopename) as myscope:
if reuse:
myscope.reuse_variables()
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module_msg(l0_xyz, l0_points, 256, [0.1,0.2], [64,64], [[64,64],[64,64],[64,128]], is_training, bn_decay, scope='corrs_layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='corrs_layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, use_xyz=False, is_training=is_training, bn_decay=bn_decay, scope='corrs_layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='corrs_fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='corrs_fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,64], is_training, bn_decay, scope='corrs_fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='corrs_fc1', bn_decay=bn_decay)
net = tf_util.conv1d(net, nfea, 1, padding='VALID', activation_fn=None, scope='corrs_fc2')
corrsfea = tf.reshape(net, [-1, num_point, nfea])
return corrsfea
示例4: trans_pred_net
# 需要导入模块: import pointnet_util [as 别名]
# 或者: from pointnet_util import pointnet_sa_module_msg [as 别名]
def trans_pred_net(xyz, flow, scopename, reuse, is_training, bn_decay=None, nfea=12):
#########################
# input
# xyz: (B x N x 3)
# flow: (B x N x 3)
# output
# pred_trans: (B x N x nfea)
#########################
num_point = xyz.get_shape()[1].value
with tf.variable_scope(scopename) as myscope:
if reuse:
myscope.reuse_variables()
l0_xyz = xyz
l0_points = flow
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module_msg(l0_xyz, l0_points, 256, [0.1,0.2], [64,64], [[64,64],[64,64],[64,128]], is_training, bn_decay, scope='trans_layer1', centralize_points=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='trans_layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, use_xyz=True, is_training=is_training, bn_decay=bn_decay, scope='trans_layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='trans_fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='trans_fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,64], is_training, bn_decay, scope='trans_fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='trans_fc1', bn_decay=bn_decay)
net = tf_util.conv1d(net, nfea, 1, padding='VALID', activation_fn=None, scope='trans_fc2')
pred_trans = tf.reshape(net, [-1, num_point, nfea])
return pred_trans
示例5: get_instance_seg_v2_net
# 需要导入模块: import pointnet_util [as 别名]
# 或者: from pointnet_util import pointnet_sa_module_msg [as 别名]
def get_instance_seg_v2_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v2 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points,
128, [0.2,0.4,0.8], [32,64,128],
[[32,32,64], [64,64,128], [64,96,128]],
is_training, bn_decay, scope='layer1')
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points,
32, [0.4,0.8,1.6], [64,64,128],
[[64,64,128], [128,128,256], [128,128,256]],
is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[128,256,1024],
mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
[128,128], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
[128,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
tf.concat([l0_xyz,l0_points],axis=-1), l1_points,
[128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='conv1d-fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training, scope='dp1')
logits = tf_util.conv1d(net, 2, 1,
padding='VALID', activation_fn=None, scope='conv1d-fc2')
return logits, end_points