当前位置: 首页>>代码示例>>Python>>正文


Python slim.softmax方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python slim.softmax方法的具体用法?Python slim.softmax怎么用?Python slim.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _add_seglink_layers

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def _add_seglink_layers(self):
        all_seg_scores = []
        all_seg_offsets = []
        all_within_layer_link_scores = []
        all_cross_layer_link_scores  = []
        for layer_name in self.feat_layers:
            with tf.variable_scope(layer_name):
                seg_scores, seg_offsets, within_layer_link_scores, cross_layer_link_scores = self._build_seg_link_layer(layer_name)
            all_seg_scores.append(seg_scores)
            all_seg_offsets.append(seg_offsets)
            all_within_layer_link_scores.append(within_layer_link_scores)
            all_cross_layer_link_scores.append(cross_layer_link_scores)
            
        self.seg_score_logits = reshape_and_concat(all_seg_scores) # (batch_size, N, 2)
        self.seg_scores = slim.softmax(self.seg_score_logits) # (batch_size, N, 2)
        self.seg_offsets = reshape_and_concat(all_seg_offsets) # (batch_size, N, 5)
        self.cross_layer_link_scores = reshape_and_concat(all_cross_layer_link_scores)  # (batch_size, 8N, 2)
        self.within_layer_link_scores = reshape_and_concat(all_within_layer_link_scores)  # (batch_size, 4(N - N_conv4_3), 2)
        self.link_score_logits = tf.concat([self.within_layer_link_scores, self.cross_layer_link_scores], axis = 1)
        self.link_scores = slim.softmax(self.link_score_logits)
        
        tf.summary.histogram('link_scores', self.link_scores)
        tf.summary.histogram('seg_scores', self.seg_scores) 
开发者ID:dengdan,项目名称:seglink,代码行数:25,代码来源:seglink_symbol.py

示例2: colorized_image_from_softmax

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def colorized_image_from_softmax(self, targets, decoder_output):
        ''' Regenerate colorized image from softmax distribution for all colors

        Notes:
            This is a constant mapping from distribution to actual image

        Args:
            decoder_output: list of input images (scaled between -1 and 1) with the
                       dimensions specified in the cfg
        '''
        resize_shape = tf.stack([self.input_size[0],self.input_size[1]])
        softmax_to_ab = tf.nn.convolution(decoder_output, self.trans_kernel, 'SAME' )
        resized_output = tf.image.resize_images(softmax_to_ab, 
                resize_shape,
                method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        softmax_to_ab = tf.nn.convolution(targets, self.trans_kernel, 'SAME' )
        resized_target = tf.image.resize_images(softmax_to_ab, 
                resize_shape,
                method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    
        return resized_target, resized_output 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:24,代码来源:encoder_decoder_cgan_softmax.py

示例3: attention_weights

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def attention_weights(features, tau=10.0, num_hidden=512):
  """computing attention weights
  Args:
    features: [B,N,F]
  Returns:
    [B,N] tensor with soft attention weights for each sample
  """
  B, N, F = features.get_shape().as_list()

  with tf.variable_scope('attention'):
    x = tf.reshape(features, [-1, F])
    x = slim.fully_connected(x, num_hidden, scope='fc0')
    x = slim.fully_connected(x, 1, activation_fn=None, scope='fc1')
    x = tf.reshape(x, features.get_shape()[:2])
    alpha = tf.reshape(slim.softmax(x / tau), [B,N,])
  return alpha 
开发者ID:cvlab-epfl,项目名称:social-scene-understanding,代码行数:18,代码来源:volleyball_train_stage_a.py

示例4: test_vgg

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def test_vgg(self):
        with slim.arg_scope(vgg.vgg_arg_scope()):
            net, end_points = vgg.vgg_16(self.inputs, self.nbclasses, is_training=False)
            net = slim.softmax(net)
        saver = tf.train.Saver(tf.global_variables())
        check_point = 'test/data/vgg_16.ckpt'

        sess = tf.InteractiveSession()
        saver.restore(sess, check_point)

        self.sess = sess
        self.graph_origin = tf.get_default_graph()
        self.target_op_name = darkon.Gradcam.candidate_featuremap_op_names(sess, self.graph_origin)[-2]
        self.model_name = 'vgg'
        self.assertEqual('vgg_16/conv5/conv5_3/Relu', self.target_op_name) 
开发者ID:darkonhub,项目名称:darkon,代码行数:17,代码来源:test_gradcam.py

示例5: instantiate_softmax

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def instantiate_softmax(self, node, tensor, params):
        return slim.softmax(tensor, **params) 
开发者ID:deep-fry,项目名称:mayo,代码行数:4,代码来源:layers.py

示例6: encoder_multilayers_fc

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def encoder_multilayers_fc(input_placeholder, is_training, 
                        layer_num, hidden_size, output_size,
                        weight_decay=0.0001, scope="three_layer_fc_network", dropout=0.5, reuse=None):
    ''' An encoder with three FC layers with every but last FC layer
        output to hidden_size, the final FC layer will have no
        acitvation instead of relu for other layers'''

    print('\t building multilayers FC encoder', scope)

    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                #weights_regularizer=slim.l2_regularizer(weight_decay) ):
                weights_regularizer=slim.l2_regularizer(weight_decay)):

            print('\t\tinput with size:', input_placeholder.get_shape())
            net = input_placeholder

            # FC layer 1~(i-1)
            for i in range(layer_num - 1):
                net = add_fc_with_dropout_layer(net, is_training, hidden_size, activation_fn=tf.nn.relu, dropout=dropout, scope='fc'+str(i))

            # Last FC layer
            net = add_fc_layer(net, is_training, output_size, activation_fn=None, scope='fc'+str(layer_num)) 
        
            # Softmax Activation
            #net = slim.softmax(net, scope='predictions') 

            end_points = convert_collection_to_dict(end_points_collection)
            return net, end_points 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:34,代码来源:sample_models.py

示例7: det_net

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def det_net(features, num_resnet_blocks, num_resnet_features,
            num_keep, in_size,
            nms_kind='greedy',
            scope=None):

  with tf.variable_scope(scope, 'DetNet'):
    out_size = features.get_shape()[1:3]

    x = nnutil.stack(features,
                     num_resnet_blocks,
                     num_resnet_features,
                     downsample=False)

    with tf.variable_scope('seg'):
      seg_logits = slim.conv2d(x, 2, [1, 1],
                               activation_fn=None,
                               weights_initializer=tf.random_normal_initializer(stddev=1e-1),
                               scope='logits')
      seg_preds = slim.softmax(seg_logits)

    with tf.variable_scope('reg'):
      # TODO: use reg masks instead
      reg_preds = slim.conv2d(x, 4, [1, 1],
                              weights_initializer=tf.random_normal_initializer(stddev=1e-3),
                              activation_fn=tf.nn.relu,
                              scope='reg_preds')

    with tf.variable_scope('boxes'):
      boxes_proposals = reg_to_boxes(reg_preds, in_size, out_size)
      boxes_preds = compute_detections_batch(seg_preds, boxes_proposals,
                                             num_keep, nms_kind=nms_kind)

  return seg_preds, reg_preds, boxes_proposals, boxes_preds 
开发者ID:cvlab-epfl,项目名称:social-scene-understanding,代码行数:35,代码来源:detnet.py

示例8: build_output

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def build_output(
        self,
        inputs: tf.Tensor,
        is_training: tf.placeholder,
        output_name: str
    ) -> [tf.Tensor, tf.Tensor, tf.Tensor, Dict]:
        logits, endpoints = self.build_inference(inputs, is_training=is_training)
        output = slim.softmax(logits, scope=output_name + "/softmax")
        output = tf.identity(output, name=output_name)
        return inputs, logits, output, endpoints 
开发者ID:hyperconnect,项目名称:TC-ResNet,代码行数:12,代码来源:audio_nets.py

示例9: resnet_v2

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def resnet_v2(inputs,
              blocks,
              num_classes=None,
              global_pool=True,
              model_type='vanilla',
              scope=None,
              reuse=None,
              end_points=None):
  with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
    if end_points is None:
      end_points = {}
    end_points['inputs'] = inputs
    end_points['flops'] = end_points.get('flops', 0)
    net = inputs
    # We do not include batch normalization or activation functions in conv1
    # because the first ResNet unit will perform these. Cf. Appendix of [2].
    with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
      net, current_flops = flopsometer.conv2d_same(
          net, 64, 7, stride=2, scope='conv1')
      end_points['flops'] += current_flops
    net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
    # Early stopping is broken in distributed training.
    net, end_points = resnet_act.stack_blocks(
        net,
        blocks,
        model_type=model_type,
        end_points=end_points)

    if global_pool or num_classes is not None:
      # This is needed because the pre-activation variant does not have batch
      # normalization or activation functions in the residual unit output. See
      # Appendix of [2].
      net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')

    if global_pool:
      # Global average pooling.
      net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)

    if num_classes is not None:
      net, current_flops = flopsometer.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='logits')
      end_points['flops'] += current_flops
      end_points['predictions'] = slim.softmax(net, scope='predictions')
    return net, end_points 
开发者ID:mfigurnov,项目名称:sact,代码行数:50,代码来源:imagenet_model.py

示例10: build_model

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
        '''Builds the model. Assumes that the input is from range [-1, 1].
        Notes:
            Stocasticity is not supplied in this function. If desired, it must
            be defined in the encoder/decoder model method. 
        Args:
            input_imgs: list of input images (scaled between -1 and 1) with the
                       dimensions specified in the cfg
            is_training: flag for whether the model is in training mode or not
            mask: mask used for computing sum of squares loss. If None, we assume
                  it is np.ones.
        '''
        print('building model')
        self.input_images = input_imgs
        self.privileged_input = privileged_input
        if self.privileged_input is None:
            self.privileged_input = input_imgs
        self.target_images = targets
        self.targets = targets
        self.masks = masks
        
        # build generator
        if masks is None:
            masks = tf.constant( 1, dtype=tf.float32, shape=[], name='constant_mask' )
        if self.decoder_only:
            self.encoder_output = input_imgs # Assume that the input is the representation
        else:
            self.encoder_output = self.build_encoder(input_imgs, is_training)
        self.decoder_output = self.build_decoder( self.encoder_output, is_training )
        temp = slim.softmax(self.decoder_output * 2.606)
        self.generated_target, self.generated_output = self.colorized_image_from_softmax(self.target_images, temp)
        # build discriminator
        self.augmented_images = []
        self.discriminator_endpoints = []
        self.discriminator_output_real = self.build_discriminator( # run once on real targets
                    self.privileged_input, self.generated_target, is_training ) 
        self.discriminator_output_fake = self.build_discriminator( # run once on the output
                    self.privileged_input, self.generated_output, is_training, reuse=True )
        # self.discriminator_output_real = self.build_discriminator( # run once on real targets
        #             self.privileged_input, self.target_images, is_training ) 
        # self.discriminator_output_fake = self.build_discriminator( # run once on the output
        #             self.privileged_input, self.decoder_output, is_training, reuse=True )

        resized_output = tf.reshape(self.decoder_output, [-1, 313])
        resized_target = tf.reshape(targets, [-1, 313])
        masks = tf.reshape(masks, [-1])
        # set up losses
        _ = self.get_losses( resized_output, resized_target, masks, 
             discriminator_predictions_real=self.discriminator_output_real,
             discriminator_predictions_fake=self.discriminator_output_fake )
        
        # record accuracies
        self._build_metrics( scope='metrics')

        # add summaries
        self._build_summaries()

        # discriminator accuracies
        self.model_built = True 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:61,代码来源:encoder_decoder_cgan_softmax.py

示例11: encoder_multilayers_fc_bn

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def encoder_multilayers_fc_bn(input_placeholder, is_training, 
                        layer_num, hidden_size, output_size,
                        weight_decay=0.0001, scope="three_layer_fc_network", dropout=0.5, reuse=None, batch_norm_decay=0.9,
                        batch_norm_epsilon=1e-5, batch_norm_scale=True,batch_norm_center=True, initial_dropout=False):
    ''' An encoder with three FC layers with every but last FC layer
        output to hidden_size, the final FC layer will have no
        acitvation instead of relu for other layers'''

    print('\t building multilayers FC encoder', scope)
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                #weights_regularizer=slim.l2_regularizer(weight_decay) ):
                weights_regularizer=slim.l2_regularizer(weight_decay),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params):

            print('\t\tinput with size:', input_placeholder.get_shape())
            net = input_placeholder
            if initial_dropout:
                net = tf.layers.dropout(
                        net,
                        rate=1.-dropout,
                        training=is_training)
            # FC layer 1~(i-1)
            for i in range(layer_num - 1):
                net = add_fc_with_dropout_layer(net, is_training, hidden_size, activation_fn=tf.nn.relu, dropout=dropout, scope='fc'+str(i))

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                 activation_fn=None,
                 normalizer_fn=None,
                 weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                 weights_regularizer=slim.l2_regularizer(weight_decay) ):
            # Last FC layer
            net = add_fc_layer(net, is_training, output_size, activation_fn=None, scope='fc'+str(layer_num)) 
        
            # Softmax Activation
            #net = slim.softmax(net, scope='predictions') 

            end_points = convert_collection_to_dict(end_points_collection)
            return net, end_points 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:53,代码来源:sample_models.py

示例12: encoder_multilayers_fc_bn_res_no_dropout

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def encoder_multilayers_fc_bn_res_no_dropout(input_placeholder, is_training, 
                        layer_num, hidden_size, output_size,
                        weight_decay=0.0001, scope="three_layer_fc_network", dropout=0.8,
                         batch_norm_decay=0.9, batch_norm_epsilon=1e-5, 
                         reuse=None):
    ''' An encoder with three FC layers with every but last FC layer
        output to hidden_size, the final FC layer will have no
        acitvation instead of relu for other layers'''
    batch_norm_params = {'center': True,
                         'scale': True,
                        'decay': batch_norm_decay,
                        'epsilon': batch_norm_epsilon,
                         'is_training': is_training}

    print('\t building multilayers FC encoder', scope)
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):

            print('\t\tinput with size:', input_placeholder.get_shape())
            net = input_placeholder

            # FC layer 1~(i-1)
            for i in range(layer_num - 1):
                    net = add_fc_layer(net, is_training, hidden_size, activation_fn=tf.nn.relu, scope='fc'+str(i))

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):
            # Last FC layer
            net = add_fc_layer(net, is_training, output_size, activation_fn=None, scope='fc'+str(layer_num-1)) 
            
            # Make residual connection
            net = net + input_placeholder

            # Softmax Activation
            #net = slim.softmax(net, scope='predictions') 

            end_points = convert_collection_to_dict(end_points_collection)
            return net, end_points 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:49,代码来源:sample_models.py

示例13: encoder_multilayers_fc_bn_res_no_dropout_normalize_input

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def encoder_multilayers_fc_bn_res_no_dropout_normalize_input(input_placeholder, is_training, 
                        layer_num, hidden_size, output_size,
                        weight_decay=0.0001, scope="three_layer_fc_network", dropout=0.8,
                         batch_norm_decay=0.9, batch_norm_epsilon=1e-5, 
                         reuse=None):
    ''' An encoder with three FC layers with every but last FC layer
        output to hidden_size, the final FC layer will have no
        acitvation instead of relu for other layers'''
    batch_norm_params = {'center': True,
                         'scale': True,
                        'decay': batch_norm_decay,
                        'epsilon': batch_norm_epsilon,
                         'is_training': is_training}

    print('\t building multilayers FC encoder', scope)
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):

            inputs  = tf.layers.batch_normalization(
                input_placeholder,
                axis=-1,
                momentum=batch_norm_decay,
                epsilon=batch_norm_epsilon,
                training=is_training)

            print('\t\tinput with size:', input_placeholder.get_shape())
            net = inputs

            # FC layer 1~(i-1)
            for i in range(layer_num - 1):
                    net = add_fc_layer(net, is_training, hidden_size, activation_fn=tf.nn.relu, scope='fc'+str(i))

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):
            # Last FC layer
            net = add_fc_layer(net, is_training, output_size, activation_fn=None, scope='fc'+str(layer_num-1)) 
            
            # Make residual connection
            net = net + inputs

            # Softmax Activation
            #net = slim.softmax(net, scope='predictions') 

            end_points = convert_collection_to_dict(end_points_collection)
            return net, end_points 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:56,代码来源:sample_models.py

示例14: encoder_multilayers_fc_bn_res

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def encoder_multilayers_fc_bn_res(input_placeholder, is_training, 
                        layer_num, hidden_size, output_size,
                        batch_norm_decay=0.95, batch_norm_epsilon=1e-5, 
                        weight_decay=0.0001, scope="three_layer_fc_network", dropout=0.8, reuse=None):
    ''' An encoder with three FC layers with every but last FC layer
        output to hidden_size, the final FC layer will have no
        acitvation instead of relu for other layers'''
    batch_norm_params = {'center': True,
                         'scale': True,
                         'decay': batch_norm_decay,
                         'epsilon': batch_norm_epsilon,
                         'is_training': is_training}

    print('\t building multilayers FC encoder', scope)
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):

            print('\t\tinput with size:', input_placeholder.get_shape())
            net = input_placeholder

            # FC layer 1~(i-1)
            for i in range(layer_num - 1):
                if dropout < 1.0:
                    is_training_dropout = False
                    net = add_fc_with_dropout_layer(net, is_training_dropout, hidden_size,
                        activation_fn=tf.nn.relu, dropout=dropout, scope='fc'+str(i))
                else:
                    net = add_fc_layer(net, is_training, hidden_size, activation_fn=tf.nn.relu, dropout=dropout, scope='fc'+str(i))

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                weights_regularizer=slim.l2_regularizer(weight_decay) ):
            # Last FC layer
            net = add_fc_layer(net, is_training, output_size, activation_fn=None, scope='fc'+str(layer_num)) 
            
            # Make residual connection
            net = net + input_placeholder

            # Softmax Activation
            #net = slim.softmax(net, scope='predictions') 

            end_points = convert_collection_to_dict(end_points_collection)
            return net, end_points 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:53,代码来源:sample_models.py

示例15: _construct_sequence

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import softmax [as 别名]
def _construct_sequence(batch):
        hidden, boxes = batch
        # initializing the state with features
        states = [hidden[0]]
        # TODO: make this dependent on the data
        # TODO: make it with scan ?
        for t in range(1, T):
          # find the matching boxes. TODO: try with the soft matching function
          if c.match_kind == 'boxes':
            dists = nnutil.cdist(boxes[t], boxes[t-1])
            idxs = tf.argmin(dists, 1, 'idxs')
            state_prev = tf.gather(states[t-1], idxs)
          elif c.match_kind == 'hidden':
            # TODO: actually it makes more sense to compare on states
            dists = nnutil.cdist(hidden[t], hidden[t-1])
            idxs = tf.argmin(dists, 1, 'idxs')
            state_prev = tf.gather(states[t-1], idxs)
          elif c.match_kind == 'hidden-soft':
            dists = nnutil.cdist(hidden[t], hidden[t-1])
            weights = slim.softmax(-dists)
            state_prev = tf.matmul(weights, states[t-1])
          else:
            raise RuntimeError('Unknown match_kind: %s' % c.match_kind)

          def _construct_update(reuse):
            state = tf.concat(1, [state_prev, hidden[t]])
            # TODO: initialize jointly
            reset = slim.fully_connected(state, NFH, tf.nn.sigmoid,
                                         reuse=reuse,
                                         scope='reset')
            step = slim.fully_connected(state, NFH, tf.nn.sigmoid,
                                        reuse=reuse,
                                        scope='step')
            state_r = tf.concat(1, [reset * state_prev, hidden[t]])
            state_up = slim.fully_connected(state_r, NFH, tf.nn.tanh,
                                            reuse=reuse,
                                            scope='state_up')
            return state_up, step
          try:
            state_up, step = _construct_update(reuse=True)
          except ValueError:
            state_up, step = _construct_update(reuse=False)

          state = step * state_up + (1.0 - step) * state_prev
          states.append(state)
        return tf.pack(states) 
开发者ID:cvlab-epfl,项目名称:social-scene-understanding,代码行数:48,代码来源:volleyball_train_stage_b.py


注:本文中的tensorflow.contrib.slim.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。