本文整理匯總了Python中tf_util.fully_connected方法的典型用法代碼示例。如果您正苦於以下問題:Python tf_util.fully_connected方法的具體用法?Python tf_util.fully_connected怎麽用?Python tf_util.fully_connected使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tf_util
的用法示例。
在下文中一共展示了tf_util.fully_connected方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: single_encoding_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def single_encoding_net(pc, mlp_list, mlp_list2, scope, is_training, bn_decay):
''' The encoding network for instance
Input:
pc: [B, N, 3]
Return:
net: [B, nfea]
'''
with tf.variable_scope(scope) as myscope:
net = tf.expand_dims(pc, 2)
for i,num_out_channel in enumerate(mlp_list):
net = tf_util.conv2d(net, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv%d'%i, bn_decay=bn_decay)
net = tf.reduce_max(net, axis=[1])
net = tf.squeeze(net, 1)
for i,num_out_channel in enumerate(mlp_list2):
net = tf_util.fully_connected(net, num_out_channel, bn=True, is_training=is_training,
scope='fc%d'%i, bn_decay=bn_decay)
return net
示例2: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
示例3: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
示例4: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(point_cloud, is_training, bn_decay=None, num_class=NUM_CLASSES):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, num_class, activation_fn=None, scope='fc3')
return net, end_points
示例5: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" Densenet169 regression model, input is BxWxHx3, output Bx2"""
net = get_densenet(224, 224)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例6: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" Inception_V4 regression model, input is BxWxHx3, output Bx2"""
net = get_inception(299, 299)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例7: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" ResNet152 regression model, input is BxWxHx3, output Bx2"""
net = get_resnet(224, 224)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例8: get_pose
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_pose(source_global_feature, template_global_feature, is_training, bn_decay=None):
net = tf.concat([source_global_feature,template_global_feature],1)
net = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=False, is_training=is_training,scope='fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,scope='dp4')
predicted_transformation = tf_util.fully_connected(net, 7, activation_fn=None, scope='fc4')
return predicted_transformation
示例9: get_pose
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_pose(self,source_global_feature,template_global_feature,is_training,bn_decay=None):
# with tf.variable_scope('pose_estimation') as pn:
net = tf.concat([source_global_feature,template_global_feature],1)
net = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc3', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc4', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=False, is_training=is_training,scope='fc5', bn_decay=bn_decay)
predicted_transformation = tf_util.fully_connected(net, 7, activation_fn=None, scope='fc6')
return predicted_transformation
示例10: feature_transform_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
""" Feature Transform Net, input is BxNx1xK
Return:
Transformation matrix of size KxK """
batch_size = inputs.get_shape()[0].value
num_point = inputs.get_shape()[1].value
net = tf_util.conv2d(inputs, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_feat') as sc:
weights = tf.get_variable('weights', [256, K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases = tf.get_variable('biases', [K*K],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
transform = tf.reshape(transform, [batch_size, K, K])
return transform
示例11: get_model
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_model(point_cloud, num_frames, is_training, bn_decay=None):
""" Input:
point_cloud: [batch_size, num_point * num_frames, 3]
Output:
net: [batch_size, num_class] """
end_points = {}
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value // num_frames
l0_xyz = point_cloud
l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \
axis=-2)
l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1)
RADIUS1 = np.linspace(0.5, 0.6, num_frames, dtype='float32')
RADIUS2 = RADIUS1 * 2
RADIUS3 = RADIUS1 * 4
RADIUS4 = RADIUS1 * 8
l1_xyz, l1_time, l1_points, l1_indices = meteor_direct_module(l0_xyz, l0_time, l0_points, npoint=1024, radius=RADIUS1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 20, activation_fn=None, scope='fc3')
return net, end_points
示例12: get_mlp
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_mlp(net, layer_sizes, scope_name, is_training, bn_decay, dropout=None):
assert len(layer_sizes) > 0
with tf.variable_scope(scope_name):
for idx, layer_size in enumerate(layer_sizes[:-1]):
net = tf_util.fully_connected(net, layer_size, bn=True, is_training=is_training, scope=f'fc{idx+1}', bn_decay=bn_decay)
if dropout is not None:
net = tf_util.dropout(net, keep_prob=dropout, is_training=is_training, scope='dp1')
return tf_util.fully_connected(net, layer_sizes[-1], activation_fn=None, scope=f'fc{len(layer_sizes)}')
示例13: build_pointnet2_cls
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def build_pointnet2_cls(scope, point_cloud, out_dims, is_training, bn_decay):
with tf.variable_scope(scope):
batch_size = tf.shape(point_cloud)[0]
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, 1024])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
results = []
for idx, out_dim in enumerate(out_dims):
current_result = tf_util.fully_connected(net, out_dim, activation_fn=None, scope='fc3_{}'.format(idx))
results.append(current_result)
return results
示例14: get_center_regression_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
示例15: get_3d_box_estimation_v1_net
# 需要導入模塊: import tf_util [as 別名]
# 或者: from tf_util import fully_connected [as 別名]
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf_util.perceptron(object_point_cloud, 128,
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.perceptron(net, 128,
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.perceptron(net, 256,
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.perceptron(net, 512,
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(tf.expand_dims(net, 3), [num_point,1],
padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1, 3])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points