本文整理匯總了Python中tf_util.max_pool2d方法的典型用法代碼示例。如果您正苦於以下問題:Python tf_util.max_pool2d方法的具體用法?Python tf_util.max_pool2d怎麽用?Python tf_util.max_pool2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tf_util
的用法示例。
在下文中一共展示了tf_util.max_pool2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _get_dgcnn
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def _get_dgcnn(pcs, layer_sizes, scope_name, is_training, bn_decay):
assert len(layer_sizes) > 0
num_point = pcs.shape[1]
k = 20
with tf.variable_scope(scope_name):
adj_matrix = tf_util_dgcnn.pairwise_distance(pcs)
nn_idx = tf_util_dgcnn.knn(adj_matrix, k=k)
edge_feature = tf_util_dgcnn.get_edge_feature(pcs, nn_idx=nn_idx, k=k)
net = tf_util_dgcnn.conv2d(edge_feature, layer_sizes[0], [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
for idx, layer_size in enumerate(layer_sizes[1:-1]):
net = tf_util_dgcnn.conv2d(net, layer_size, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope=f'conv{idx+2}', bn_decay=bn_decay)
net = tf.reduce_max(net, axis=-2, keepdims=True)
net = tf_util_dgcnn.conv2d(net, layer_sizes[-1], [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope=f'conv{len(layer_sizes)}', bn_decay=bn_decay)
net = tf_util_dgcnn.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
return net
示例2: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_model(source_point_cloud, template_point_cloud, is_training, bn_decay=None):
point_cloud = tf.concat([source_point_cloud, template_point_cloud],0)
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
source_global_feature = tf.slice(net, [0,0], [int(batch_size/2),1024])
template_global_feature = tf.slice(net, [int(batch_size/2),0], [int(batch_size/2),1024])
return source_global_feature, template_global_feature
示例3: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_model(self, source_point_cloud, template_point_cloud, feature_size, is_training, bn_decay=None):
point_cloud = tf.concat([source_point_cloud, template_point_cloud], 0)
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, feature_size, [1,1],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
# Extract the features from the network.
source_global_feature = tf.slice(net, [0,0], [int(batch_size/2),feature_size])
template_global_feature = tf.slice(net, [int(batch_size/2),0], [int(batch_size/2),feature_size])
return source_global_feature, template_global_feature
示例4: feature_transform_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
""" Feature Transform Net, input is BxNx1xK
Return:
Transformation matrix of size KxK """
batch_size = inputs.get_shape()[0].value
num_point = inputs.get_shape()[1].value
net = tf_util.conv2d(inputs, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_feat') as sc:
weights = tf.get_variable('weights', [256, K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases = tf.get_variable('biases', [K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
transform = tf.reshape(transform, [batch_size, K, K])
return transform
示例5: _get_pointnet
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def _get_pointnet(pcs_extended, layer_sizes, scope_name, is_training, bn_decay):
assert len(layer_sizes) > 0
num_point = pcs_extended.shape[1]
num_channel = pcs_extended.shape[2]
with tf.variable_scope(scope_name):
# Point functions (MLP implemented as conv2d)
net = tf_util.conv2d(pcs_extended, layer_sizes[0], [1, num_channel], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
for idx, layer_size in enumerate(layer_sizes[1:]):
net = tf_util.conv2d(net, layer_size, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope=f'conv{idx+2}', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
return net
示例6: get_center_regression_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
示例7: get_3d_box_estimation_v1_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf_util.perceptron(object_point_cloud, 128,
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.perceptron(net, 128,
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.perceptron(net, 256,
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.perceptron(net, 512,
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(tf.expand_dims(net, 3), [num_point,1],
padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1, 3])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
示例8: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_model(point_cloud, is_training, bn=True, bn_decay=None):
""" ConvNet baseline, input is BxNx9 gray image """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
# CONV
net = tf_util.conv2d(input_image, 64, [1, 9], padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, scope='conv4', bn_decay=bn_decay)
points_feat1 = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, scope='conv5', bn_decay=bn_decay)
# MAX
pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point, 1], padding='VALID', scope='maxpool1')
# FC
pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1])
pc_feat1 = tf_util.fully_connected(pc_feat1, 256, bn=bn, is_training=is_training, scope='fc1', bn_decay=bn_decay)
pc_feat1 = tf_util.fully_connected(pc_feat1, 128, bn=bn, is_training=is_training, scope='fc2', bn_decay=bn_decay)
# print(pc_feat1)
# CONCAT
pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
points_feat1_concat = tf.concat(axis=3, values=[points_feat1, pc_feat1_expand])
# CONV
net = tf_util.conv2d(points_feat1_concat, 512, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv6')
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv7')
# net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
# net = tf_util.conv2d(net, 13, [1, 1], padding='VALID', stride=[1, 1],
# activation_fn=None, scope='conv8')
# net = tf.squeeze(net, [2])
return net
示例9: get_center_regression_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value#M
net = tf.expand_dims(object_point_cloud, 2)#(B,M,1,C)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
示例10: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_model(point_cloud, is_training, bn_decay=None):
""" ConvNet baseline, input is BxNx3 gray image """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
# CONV
net = tf_util.conv2d(input_image, 64, [1,9], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
points_feat1 = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
# MAX
pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point,1], padding='VALID', scope='maxpool1')
# FC
pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1])
pc_feat1 = tf_util.fully_connected(pc_feat1, 256, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
pc_feat1 = tf_util.fully_connected(pc_feat1, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
print(pc_feat1)
# CONCAT
pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
points_feat1_concat = tf.concat(axis=3, values=[points_feat1, pc_feat1_expand])
# CONV
net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv6')
net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='conv7')
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
activation_fn=None, scope='conv8')
net = tf.squeeze(net, [2])
return net
示例11: get_transform
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_transform(point_cloud, is_training, bn_decay=None, K = 3):
""" Transform Net, input is BxNx3 gray image
Return:
Transformation matrix of size 3xK """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_XYZ') as sc:
assert(K==3)
weights = tf.get_variable('weights', [128, 3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
biases = tf.get_variable('biases', [3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
#transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
transform = tf.reshape(transform, [batch_size, 3, K])
return transform
示例12: build_fusion_block
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def build_fusion_block(self, graphs, num_vertices):
out = self.mlp_builder.build(tf.concat(graphs, axis=-1),
1024,
scope='adj_conv_'+'final',
is_training=self.is_training)
out_max = tf_util.max_pool2d(out, [num_vertices, 1], padding='VALID', scope='maxpool')
expand = tf.tile(out_max, [1, num_vertices, 1, 1])
fusion = tf.concat(axis=3, values=[expand]+graphs)
return fusion
示例13: get_transform_K
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_transform_K(inputs, is_training, bn_decay=None, K = 3):
""" Transform Net, input is BxNx1xK gray image
Return:
Transformation matrix of size KxK """
batch_size = inputs.get_shape()[0].value
num_point = inputs.get_shape()[1].value
net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_feat') as sc:
weights = tf.get_variable('weights', [256, K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
biases = tf.get_variable('biases', [K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
#transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
transform = tf.reshape(transform, [batch_size, K, K])
return transform
示例14: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, -1)
# Point functions (MLP implemented as conv2d)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
# MLP on global point cloud vector
net = tf.reshape(net, [batch_size, -1])
return net
示例15: input_transform_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import max_pool2d [as 別名]
def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
""" Input (XYZ) Transform Net, input is BxNx3 gray image
Return:
Transformation matrix of size 3xK """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_XYZ') as sc:
assert(K==3)
weights = tf.get_variable('weights', [256, 3*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases = tf.get_variable('biases', [3*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases += tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
transform = tf.reshape(transform, [batch_size, 3, K])
return transform