本文整理汇总了Python中tf_util.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python tf_util.dropout方法的具体用法?Python tf_util.dropout怎么用?Python tf_util.dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tf_util
的用法示例。
在下文中一共展示了tf_util.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(pcs1, pcs2, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = pcs1.get_shape()[0].value
end_points = {}
with tf.variable_scope("siamese"):
embedding_output1, center_mean1, s1_pred_center1, s2_pred_center1, s2_pred_angle_logits1 = get_embedding_net(pcs1, is_training, end_points, bn_decay)
with tf.variable_scope("siamese", reuse=tf.AUTO_REUSE):
embedding_output2, center_mean2, s1_pred_center2, s2_pred_center2, s2_pred_angle_logits2 = get_embedding_net(pcs2, is_training, end_points, bn_decay)
embedding_output_combined = tf.concat([embedding_output1, embedding_output2], axis=3)
end_points['pred_s1_pc1centers'] = s1_pred_center1
end_points['pred_s1_pc2centers'] = s1_pred_center2
end_points['pred_s2_pc1centers'] = s2_pred_center1
end_points['pred_s2_pc2centers'] = s2_pred_center2
end_points['pred_pc1angle_logits'] = s2_pred_angle_logits1
end_points['pred_pc2angle_logits'] = s2_pred_angle_logits2
net = tf.reshape(embedding_output_combined, [batch_size, -1])
net = get_mlp(net, [*cfg.model.options.remaining_transform_prediction[0], 3 + cfg.model.angles.num_bins * 2], '', is_training, bn_decay, dropout=cfg.model.options.remaining_transform_prediction[1])
end_points['pred_translations'] = net[:, :3] + (s2_pred_center2 - s2_pred_center1)
end_points['pred_remaining_angle_logits'] = net[:, 3:]
return end_points
示例2: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
示例3: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
示例4: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
示例5: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(point_cloud, is_training, bn_decay=None, num_class=NUM_CLASSES):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(net, num_class, activation_fn=None, scope='fc3')
return net, end_points
示例6: build_mlp_pred_block
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def build_mlp_pred_block(self, fusion, num_classes):
self.mlp_builder.bn_decay = None
out = self.mlp_builder.build(fusion,
512,
scope='seg/conv1',
is_training=self.is_training)
out = self.mlp_builder.build(out,
256,
scope='seg/conv2',
is_training=self.is_training)
out = tf_util.dropout(out,
keep_prob=0.7,
scope='dp1',
is_training=self.is_training)
self.mlp_builder.bn = False
out = self.mlp_builder.build(out,
num_classes,
scope='seg/conv3',
activation_fn=None)
pred = tf.squeeze(out, [2])
return pred
示例7: weight_net
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def weight_net(xyz, hidden_units, scope, is_training, bn_decay=None, weight_decay=None,
activation_fn=tf.nn.relu, is_dist=False):
with tf.variable_scope(scope) as sc:
net = xyz
for i, num_hidden_units in enumerate(hidden_units):
if i != len(hidden_units) - 1:
net = tf_util.conv2d(net, num_hidden_units, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training, activation_fn=activation_fn,
scope='wconv{}'.format(i), bn_decay=bn_decay,
weight_decay=weight_decay, is_dist=is_dist)
else:
net = tf_util.conv2d(net, num_hidden_units, [1, 1],
padding='VALID', stride=[1, 1],
bn=False, is_training=is_training, activation_fn=None,
scope='wconv{}'.format(i), bn_decay=bn_decay,
weight_decay=weight_decay, is_dist=is_dist)
# net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='wconv_dp{}'.format(i))
return net
示例8: nonlinear_transform
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def nonlinear_transform(data_in, mlp, scope, is_training, bn_decay=None, weight_decay=None,
activation_fn=tf.nn.relu, is_dist=False):
with tf.variable_scope(scope) as sc:
net = data_in
l = len(mlp)
if l > 1:
for i, out_ch in enumerate(mlp[0:(l - 1)]):
net = tf_util.conv2d(net, out_ch, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training, activation_fn=tf.nn.relu,
scope='nonlinear{}'.format(i), bn_decay=bn_decay,
weight_decay=weight_decay, is_dist=is_dist)
# net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp_nonlinear{}'.format(i))
net = tf_util.conv2d(net, mlp[-1], [1, 1],
padding='VALID', stride=[1, 1],
bn=False, is_training=is_training,
scope='nonlinear%d' % (l - 1), bn_decay=bn_decay,
activation_fn=tf.nn.sigmoid, weight_decay=weight_decay, is_dist=is_dist)
return net
示例9: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" Densenet169 regression model, input is BxWxHx3, output Bx2"""
net = get_densenet(224, 224)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例10: dense_block
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = concatenate([concat_feat, x], axis=3, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
示例11: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" Inception_V4 regression model, input is BxWxHx3, output Bx2"""
net = get_inception(299, 299)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例12: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
""" ResNet152 regression model, input is BxWxHx3, output Bx2"""
net = get_resnet(224, 224)(net)
if not add_lstm:
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')
else:
net = tf_util.fully_connected(net, 784, bn=True,
is_training=is_training,
scope='fc_lstm',
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope="dp1")
net = cnn_lstm_block(net)
return net
示例13: get_pose
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_pose(source_global_feature, template_global_feature, is_training, bn_decay=None):
net = tf.concat([source_global_feature,template_global_feature],1)
net = tf_util.fully_connected(net, 1024, bn=False, is_training=is_training,scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 512, bn=False, is_training=is_training,scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=False, is_training=is_training,scope='fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,scope='dp4')
predicted_transformation = tf_util.fully_connected(net, 7, activation_fn=None, scope='fc4')
return predicted_transformation
示例14: get_model
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_model(point_cloud, num_frames, is_training, bn_decay=None):
""" Input:
point_cloud: [batch_size, num_point * num_frames, 3]
Output:
net: [batch_size, num_class] """
end_points = {}
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value // num_frames
l0_xyz = point_cloud
l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \
axis=-2)
l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1)
RADIUS1 = np.linspace(0.5, 0.6, num_frames, dtype='float32')
RADIUS2 = RADIUS1 * 2
RADIUS3 = RADIUS1 * 4
RADIUS4 = RADIUS1 * 8
l1_xyz, l1_time, l1_points, l1_indices = meteor_direct_module(l0_xyz, l0_time, l0_points, npoint=1024, radius=RADIUS1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# Fully connected layers
net = tf.reshape(l3_points, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 20, activation_fn=None, scope='fc3')
return net, end_points
示例15: get_mlp
# 需要导入模块: import tf_util [as 别名]
# 或者: from tf_util import dropout [as 别名]
def get_mlp(net, layer_sizes, scope_name, is_training, bn_decay, dropout=None):
assert len(layer_sizes) > 0
with tf.variable_scope(scope_name):
for idx, layer_size in enumerate(layer_sizes[:-1]):
net = tf_util.fully_connected(net, layer_size, bn=True, is_training=is_training, scope=f'fc{idx+1}', bn_decay=bn_decay)
if dropout is not None:
net = tf_util.dropout(net, keep_prob=dropout, is_training=is_training, scope='dp1')
return tf_util.fully_connected(net, layer_sizes[-1], activation_fn=None, scope=f'fc{len(layer_sizes)}')