当前位置: 首页>>代码示例>>Python>>正文


Python slim.fully_connected方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim.fully_connected方法的典型用法代码示例。如果您正苦于以下问题:Python slim.fully_connected方法的具体用法?Python slim.fully_connected怎么用?Python slim.fully_connected使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.fully_connected方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: E

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def E(self, images, is_training = False, reuse=False):
	
	if images.get_shape()[3] == 3:
	    images = tf.image.rgb_to_grayscale(images)
	
	with tf.variable_scope('encoder',reuse=reuse):
	    with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
		with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, padding='VALID'):
		    net = slim.conv2d(images, 64, 5, scope='conv1')
		    net = slim.max_pool2d(net, 2, stride=2, scope='pool1')
		    net = slim.conv2d(net, 128, 5, scope='conv2')
		    net = slim.max_pool2d(net, 2, stride=2, scope='pool2')
		    net = tf.contrib.layers.flatten(net)
		    net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc3')
		    net = slim.dropout(net, 0.5, is_training=is_training)
		    net = slim.fully_connected(net, self.hidden_repr_size, activation_fn=tf.tanh,scope='fc4')
		    # dropout here or not?
		    #~ net = slim.dropout(net, 0.5, is_training=is_training)
		    return net 
开发者ID:pmorerio,项目名称:minimal-entropy-correlation-alignment,代码行数:21,代码来源:model.py

示例2: mobilenetv2_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
开发者ID:DetectionTeamUCAS,项目名称:R2CNN_Faster-RCNN_Tensorflow,代码行数:23,代码来源:mobilenet_v2.py

示例3: _build_aux_head

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def _build_aux_head(net, end_points, num_classes, hparams, scope):
  """Auxiliary head used for all models across all datasets."""
  with tf.variable_scope(scope):
    aux_logits = tf.identity(net)
    with tf.variable_scope('aux_logits'):
      aux_logits = slim.avg_pool2d(
          aux_logits, [5, 5], stride=3, padding='VALID')
      aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
      aux_logits = tf.nn.relu(aux_logits)
      # Shape of feature map before the final layer.
      shape = aux_logits.shape
      if hparams.data_format == 'NHWC':
        shape = shape[1:3]
      else:
        shape = shape[2:4]
      aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
      aux_logits = tf.nn.relu(aux_logits)
      aux_logits = contrib_layers.flatten(aux_logits)
      aux_logits = slim.fully_connected(aux_logits, num_classes)
      end_points['AuxLogits'] = aux_logits 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:24,代码来源:nasnet_model.py

示例4: _extra_conv_arg_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc 
开发者ID:CharlesShang,项目名称:FastMaskRCNN,代码行数:18,代码来源:pyramid_network.py

示例5: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.initializers.xavier_initializer(), 
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v2(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) 
开发者ID:GaoangW,项目名称:TNT,代码行数:21,代码来源:inception_resnet_v2.py

示例6: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def inference(images, keep_probability, phase_train=True,  # @UnusedVariable
              bottleneck_layer_size=128, bottleneck_layer_activation=None, weight_decay=0.0, reuse=None):  # @UnusedVariable
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        size = np.prod(images.get_shape()[1:].as_list())
        net = slim.fully_connected(tf.reshape(images, (-1,size)), bottleneck_layer_size, activation_fn=None, 
                scope='Bottleneck', reuse=False)
        return net, None 
开发者ID:GaoangW,项目名称:TNT,代码行数:24,代码来源:dummy.py

示例7: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.initializers.xavier_initializer(), 
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) 
开发者ID:GaoangW,项目名称:TNT,代码行数:22,代码来源:inception_resnet_v1.py

示例8: encoder

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
开发者ID:GaoangW,项目名称:TNT,代码行数:22,代码来源:dfc_vae_large.py

示例9: encoder

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
开发者ID:GaoangW,项目名称:TNT,代码行数:21,代码来源:dfc_vae.py

示例10: _arg_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def _arg_scope(self, is_training, reuse=None):
        weight_decay = 0.0
        keep_probability = 1.0

        batch_norm_params = {
            'is_training': is_training,
            # Decay for the moving averages.
            'decay': 0.995,
            # epsilon to prevent 0s in variance.
            'epsilon': 0.001
        }

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            weights_initializer=slim.xavier_initializer_conv2d(uniform=True),
                            weights_regularizer=slim.l2_regularizer(weight_decay),
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            with tf.variable_scope(self._scope, self._scope, reuse=reuse):
                with slim.arg_scope([slim.batch_norm, slim.dropout],
                                    is_training=is_training) as sc:
                    return sc 
开发者ID:Sanster,项目名称:tf_ctpn,代码行数:23,代码来源:squeezenet.py

示例11: squeeze_excitation_layer

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def squeeze_excitation_layer(input_x, out_dim, ratio, layer_name, is_training):
    with tf.name_scope(layer_name):
        # Global_Average_Pooling
        squeeze = tf.reduce_mean(input_x, [1, 2])

        excitation = slim.fully_connected(inputs=squeeze,
                                          num_outputs=out_dim // ratio,
                                          weights_initializer=cfgs.BBOX_INITIALIZER,
                                          activation_fn=tf.nn.relu,
                                          trainable=is_training,
                                          scope=layer_name+'_fully_connected1')

        excitation = slim.fully_connected(inputs=excitation,
                                          num_outputs=out_dim,
                                          weights_initializer=cfgs.BBOX_INITIALIZER,
                                          activation_fn=tf.nn.sigmoid,
                                          trainable=is_training,
                                          scope=layer_name + '_fully_connected2')

        excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])

        # scale = input_x * excitation

        return excitation 
开发者ID:DetectionTeamUCAS,项目名称:R2CNN-Plus-Plus_Tensorflow,代码行数:26,代码来源:attention.py

示例12: mobilenetv2_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN
  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:22,代码来源:mobilenet_v2.py

示例13: create_model

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def create_model(self,
                   model_input,
                   vocab_size,
                   num_mixtures=None,
                   l2_penalty=1e-8,
                   sub_scope="",
                   original_input=None, 
                   **unused_params):

    num_methods = model_input.get_shape().as_list()[-1]
    num_features = model_input.get_shape().as_list()[-2]

    original_input = tf.nn.l2_normalize(original_input, dim=1)
    gate_activations = slim.fully_connected(
        original_input,
        num_methods,
        activation_fn=tf.nn.softmax,
        weights_regularizer=slim.l2_regularizer(l2_penalty),
        scope="gates"+sub_scope)

    output = tf.einsum("ijk,ik->ij", model_input, gate_activations)
    return {"predictions": output} 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:24,代码来源:input_moe_model.py

示例14: create_model

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def create_model(self, model_input, vocab_size, num_mixtures=None,
                   l2_penalty=1e-8, sub_scope="", original_input=None, **unused_params):
    num_supports = FLAGS.num_supports
    num_layers = FLAGS.hidden_chain_layers
    relu_cells = FLAGS.hidden_chain_relu_cells

    next_input = model_input
    support_predictions = []
    for layer in xrange(num_layers):
      sub_relu = slim.fully_connected(
          next_input,
          relu_cells,
          activation_fn=tf.nn.relu,
          weights_regularizer=slim.l2_regularizer(l2_penalty),
          scope=sub_scope+"relu-%d"%layer)
      sub_prediction = self.sub_model(sub_relu, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
      relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
      next_input = tf.concat([next_input, relu_norm], axis=1)
      support_predictions.append(sub_prediction)
    main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
    support_predictions = tf.concat(support_predictions, axis=1)
    return {"predictions": main_predictions, "support_predictions": support_predictions} 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:24,代码来源:hidden_combine_chain_model.py

示例15: create_model

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import fully_connected [as 别名]
def create_model(self, model_input, vocab_size, num_mixtures=None,
                   l2_penalty=1e-8, sub_scope="", original_input=None, **unused_params):
    num_supports = FLAGS.num_supports
    num_layers = FLAGS.hidden_chain_layers
    relu_cells = FLAGS.hidden_chain_relu_cells

    next_input = model_input
    support_predictions = []
    for layer in xrange(num_layers):
      sub_relu = slim.fully_connected(
          next_input,
          relu_cells,
          activation_fn=tf.nn.relu,
          weights_regularizer=slim.l2_regularizer(l2_penalty),
          scope=sub_scope+"relu-%d"%layer)
      sub_prediction = self.sub_model(sub_relu, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
      relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
      next_input = tf.concat([model_input, relu_norm], axis=1)
      support_predictions.append(sub_prediction)
    main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
    support_predictions = tf.concat(support_predictions, axis=1)
    return {"predictions": main_predictions, "support_predictions": support_predictions} 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:24,代码来源:hidden_chain_model.py


注:本文中的tensorflow.contrib.slim.fully_connected方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。