当前位置: 首页>>代码示例>>Python>>正文


Python slim.flatten函数代码示例

本文整理汇总了Python中tensorflow.contrib.slim.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了flatten函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: iter_func

  def iter_func(self, state):
    sc = predictron_arg_scope()

    with tf.variable_scope('value'):
      value_net = slim.fully_connected(slim.flatten(state), 32, scope='fc0')
      value_net = layers.batch_norm(value_net, activation_fn=tf.nn.relu, scope='fc0/preact')
      value_net = slim.fully_connected(value_net, self.maze_size, activation_fn=None, scope='fc1')

    with slim.arg_scope(sc):
      net = slim.conv2d(state, 32, [3, 3], scope='conv1')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv1/preact')
      net_flatten = slim.flatten(net, scope='conv1/flatten')

      with tf.variable_scope('reward'):
        reward_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        reward_net = layers.batch_norm(reward_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        reward_net = slim.fully_connected(reward_net, self.maze_size, activation_fn=None, scope='fc1')

      with tf.variable_scope('gamma'):
        gamma_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        gamma_net = layers.batch_norm(gamma_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        gamma_net = slim.fully_connected(gamma_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      with tf.variable_scope('lambda'):
        lambda_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        lambda_net = layers.batch_norm(lambda_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        lambda_net = slim.fully_connected(lambda_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      net = slim.conv2d(net, 32, [3, 3], scope='conv2')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv2/preact')

      net = slim.conv2d(net, 32, [3, 3], scope='conv3')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv3/preact')
    return net, reward_net, gamma_net, lambda_net, value_net
开发者ID:b-kartal,项目名称:predictron,代码行数:34,代码来源:predictron.py

示例2: _build_network

    def _build_network(self, name):
        with tf.variable_scope(name):

            # Weight initializer
            he_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_AVG', uniform=False)  # 'FAN_AVG'-mode-he-init -> works better than 'FAN'

            # The size of the final layer before splitting it into Advantage and Value streams.
            h_size = 500

            # BNN : 베이지언 신경망(접근법)(=드롭아웃) : 학습 과정 중 네트워크의 활성 노드를 랜덤하게 0으로 설정함으로써, 일종의 정규화 역할을 수행하는 기법
            # 드롭아웃으로 네트워크에서 하나의 샘플을 취하는 것은 BNN 에서 샘플링하는 것과 유사한 일이다.
            # 시간의 경과에 따라 드롭아웃 확률을 줄여준다. -> 추정값에서 노이즈를 줄여주기 위해
            # RESULT : 확실히 눈에띄게 Learning Performance 가 상승함을 확인할 수있다.
            # Honestly speaking, I'm not sure just adding dropout is right.
            model = tf.layers.dense(inputs=self.input_X, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dropout(model, rate=0.5)  # E.g. "rate=0.1" would drop out 10% of input units.
            model = tf.layers.dense(model, units=250, activation=tf.nn.relu, kernel_initializer=he_init)
            model = tf.layers.dense(model, units=h_size, activation=tf.nn.relu, kernel_initializer=he_init) # NOTE "h_size" must be located at the end hidden_layer before split
            # This right above hidden layer is the end of DQN hidden layer. That's why there's no dropout.

            # From here, it's for "Duel DQN" -> Not output Q at once but split into A(advantage), V(value) and combine them to make Q
            # We take the output from the final convolutional layer and split it into separate advantage(A) and value streams(V).
            streamAC, streamVC = tf.split( model, num_or_size_splits=2, axis=1 )

            # Flattened_Action & Flattened_Value -> Since I'm not using Conv, I don't need it. Just I leave it here.
            streamA = slim.flatten( streamAC )
            streamV = slim.flatten( streamVC )

            # Call the class to initialize weights, which improve training performance - ref.http://hwangpy.tistory.com/153
            he_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_AVG', uniform=False)

            # Action_Weight & Value_Weight
            AW = tf.Variable( he_init([h_size // 2, self.n_action]) )  # xavier_init( [row_size , column_size] )
            VW = tf.Variable( he_init([h_size // 2, 1]) )

            # Flattened_ones * Weights
            Advantage = tf.matmul(streamA, AW)
            Value = tf.matmul(streamV, VW)

            # Then combine them together to get our final Q-values.
            self.Qout = Value + tf.subtract(Advantage, tf.reduce_mean(Advantage, axis=1, keep_dims=True))
            Q = self.Qout

            ### Double DQN from this line.

            # Take an action according to 'greedy-policy' : 1. Decide next_action using predictNN(=mainNN)
            predict = tf.argmax( self.Qout, axis=1 ) # -> Be careful when applying 볼츠만 approach

        return Q, predict
开发者ID:danelee2601,项目名称:Flappy-Bird-with-DDDQN,代码行数:59,代码来源:DDDQN.py

示例3: build_arch_baseline

def build_arch_baseline(input, is_train: bool, num_classes: int):

    bias_initializer = tf.truncated_normal_initializer(
        mean=0.0, stddev=0.01)  # tf.constant_initializer(0.0)
    # The paper didnot mention any regularization, a common l2 regularizer to weights is added here
    weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)

    tf.logging.info('input shape: {}'.format(input.get_shape()))

    # weights_initializer=initializer,
    with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
        with tf.variable_scope('relu_conv1') as scope:
            output = slim.conv2d(input, num_outputs=32, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        with tf.variable_scope('relu_conv2') as scope:
            output = slim.conv2d(output, num_outputs=64, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        output = slim.flatten(output)
        output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        output = slim.dropout(output, 0.5, scope='dp')
        output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        return output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:capsnet_em.py

示例4: _build_graph

    def _build_graph(self):

        normalized_input = tf.div(self._input, 255.0)

        #d = tf.divide(1.0, tf.sqrt(8. * 8. * 4.))
        conv1 = slim.conv2d(normalized_input, 16, [8, 8], activation_fn=tf.nn.relu,
                            padding='VALID', stride=4, biases_initializer=None)
                            # weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        #d = tf.divide(1.0, tf.sqrt(4. * 4. * 16.))
        conv2 = slim.conv2d(conv1, 32, [4, 4], activation_fn=tf.nn.relu,
                            padding='VALID', stride=2, biases_initializer=None)
                            #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        flattened = slim.flatten(conv2)

        #d = tf.divide(1.0, tf.sqrt(2592.))
        fc1 = slim.fully_connected(flattened, 256, activation_fn=tf.nn.relu, biases_initializer=None)
                                   #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        #d = tf.divide(1.0, tf.sqrt(256.))
        # estimate of the value function
        self.value_func_prediction = slim.fully_connected(fc1, 1, activation_fn=None, biases_initializer=None)
                                                          #weights_initializer=tf.random_uniform_initializer(minval=-d, maxval=d))

        # softmax output with one entry per action representing the probability of taking an action
        self.policy_predictions = slim.fully_connected(fc1, self.output_size, activation_fn=tf.nn.softmax,
                                                       biases_initializer=None)
开发者ID:thalles753,项目名称:machine-learning,代码行数:28,代码来源:A3C_Network.py

示例5: resface36

def resface36(images, keep_probability, 
             phase_train=True, bottleneck_layer_size=512, 
             weight_decay=0.0, reuse=None):
    '''
    conv name
    conv[conv_layer]_[block_index]_[block_layer_index]
    '''
    with tf.variable_scope('Conv1'):
        net = resface_pre(images,64,scope='Conv1_pre')
        net = slim.repeat(net,2,resface_block,64,scope='Conv_1')
    with tf.variable_scope('Conv2'):
        net = resface_pre(net,128,scope='Conv2_pre')
        net = slim.repeat(net,4,resface_block,128,scope='Conv_2')
    with tf.variable_scope('Conv3'):
        net = resface_pre(net,256,scope='Conv3_pre')
        net = slim.repeat(net,8,resface_block,256,scope='Conv_3')
    with tf.variable_scope('Conv4'):
        net = resface_pre(net,512,scope='Conv4_pre')
        #net = resface_block(Conv4_pre,512,scope='Conv4_1')
        net = slim.repeat(net,1,resface_block,512,scope='Conv4')

    with tf.variable_scope('Logits'):
        #pylint: disable=no-member
        #net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
        #                      scope='AvgPool')
        net = slim.flatten(net)
        net = slim.dropout(net, keep_probability, is_training=phase_train,
                           scope='Dropout')
    net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
            scope='Bottleneck', reuse=False)    
    return net,''
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:31,代码来源:resface.py

示例6: LResnet50E_IR

def LResnet50E_IR(images, keep_probability, 
             phase_train=True, bottleneck_layer_size=512, 
             weight_decay=0.0, reuse=None):
    '''
    conv name
    conv[conv_layer]_[block_index]_[block_layer_index]
    
    for resnet50 n_units=[3,4,14,3], consider one unit is dim_reduction_layer
    repeat n_units=[2,3,13,2]
    '''
    with tf.variable_scope('Conv1'):
        net = slim.conv2d(images,64,scope='Conv1_pre')
        net = slim.batch_norm(net,scope='Conv1_bn')
    with tf.variable_scope('Conv2'):
        net = resface_block(net,64,stride=2,dim_match=False,scope='Conv2_pre')
        net = slim.repeat(net,2,resface_block,64,1,True,scope='Conv2_main')
    with tf.variable_scope('Conv3'):
        net = resface_block(net,128,stride=2,dim_match=False,scope='Conv3_pre')
        net = slim.repeat(net,3,resface_block,128,1,True,scope='Conv3_main')
    with tf.variable_scope('Conv4'):
        net = resface_block(net,256,stride=2,dim_match=False,scope='Conv4_pre')
        net = slim.repeat(net,13,resface_block,256,1,True,scope='Conv4_main')
    with tf.variable_scope('Conv5'):
        net = resface_block(net,512,stride=2,dim_match=False,scope='Conv5_pre')
        net = slim.repeat(net,2,resface_block,512,1,True,scope='Conv5_main')

    with tf.variable_scope('Logits'):
        net = slim.batch_norm(net,activation_fn=None,scope='bn1')
        net = slim.dropout(net, keep_probability, is_training=phase_train,scope='Dropout')        
        net = slim.flatten(net)
    
    net = slim.fully_connected(net, bottleneck_layer_size, biases_initializer=tf.contrib.layers.xavier_initializer(), scope='fc1')
    net = slim.batch_norm(net, activation_fn=None, scope='Bottleneck')

    return net,''
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:35,代码来源:insightface.py

示例7: encoder

    def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images
                    
                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
                    
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
                    
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
开发者ID:NickyGeorge,项目名称:facenet,代码行数:29,代码来源:dfc_vae_resnet.py

示例8: flatten_fully_connected

def flatten_fully_connected(inputs,
                            num_outputs,
                            activation_fn=tf.nn.relu,
                            normalizer_fn=None,
                            normalizer_params=None,
                            weights_initializer=slim.xavier_initializer(),
                            weights_regularizer=None,
                            biases_initializer=tf.zeros_initializer(),
                            biases_regularizer=None,
                            reuse=None,
                            variables_collections=None,
                            outputs_collections=None,
                            trainable=True,
                            scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope)
开发者ID:KhanhDinhDuy,项目名称:gaan,代码行数:31,代码来源:ops.py

示例9: content_extractor

 def content_extractor(self, images, reuse=False):
     # images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
     
     if images.get_shape()[3] == 1:
         # For mnist dataset, replicate the gray scale image 3 times.
         images = tf.image.grayscale_to_rgb(images)
     
     with tf.variable_scope('content_extractor', reuse=reuse):
         with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                              stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
             with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                 activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
                 
                 net = slim.conv2d(images, 64, [3, 3], scope='conv1')   # (batch_size, 16, 16, 64)
                 net = slim.batch_norm(net, scope='bn1')
                 net = slim.conv2d(net, 128, [3, 3], scope='conv2')     # (batch_size, 8, 8, 128)
                 net = slim.batch_norm(net, scope='bn2')
                 net = slim.conv2d(net, 256, [3, 3], scope='conv3')     # (batch_size, 4, 4, 256)
                 net = slim.batch_norm(net, scope='bn3')
                 net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4')   # (batch_size, 1, 1, 128)
                 net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
                 if self.mode == 'pretrain':
                     net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
                     net = slim.flatten(net)
                 return net
开发者ID:ALISCIFP,项目名称:domain-transfer-network,代码行数:25,代码来源:model.py

示例10: build_single_inceptionv3

def build_single_inceptionv3(train_tfdata, is_train, dropout_keep_prob, reduce_dim = False):
    train_tfdata_resize = tf.image.resize_images(train_tfdata, (299, 299))
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        identity, end_points = inception.inception_v3(train_tfdata_resize, dropout_keep_prob = dropout_keep_prob, is_training=is_train)
        feature = slim.flatten(end_points['Mixed_7c'])
        if reduce_dim:
            feature = slim.fully_connected(feature, 256, scope='feat')
    return identity, feature
开发者ID:seindlut,项目名称:deep_p2s,代码行数:8,代码来源:build_subnet.py

示例11: loss

    def loss(self, x, y):
        with tf.name_scope('loss'):
            z_mu, z_lv = self._encode(x)
            z = GaussianSampleLayer(z_mu, z_lv)
            xh = self._generate(z, y)

            D_KL = tf.reduce_mean(
                GaussianKLD(
                    slim.flatten(z_mu),
                    slim.flatten(z_lv),
                    slim.flatten(tf.zeros_like(z_mu)),
                    slim.flatten(tf.zeros_like(z_lv)),
                )
            )
            logPx = tf.reduce_mean(
                GaussianLogDensity(
                    slim.flatten(x),
                    slim.flatten(xh),
                    tf.zeros_like(slim.flatten(xh))),
            )

        loss = dict()
        loss['G'] = - logPx + D_KL
        loss['D_KL'] = D_KL
        loss['logP'] = logPx

        tf.summary.scalar('KL-div', D_KL)
        tf.summary.scalar('logPx', logPx)

        tf.summary.histogram('xh', xh)
        tf.summary.histogram('x', x)
        return loss
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:32,代码来源:vae.py

示例12: generative_network

def generative_network(z):
  """Generative network to parameterize generative model. It takes
  latent variables as input and outputs the likelihood parameters.

  logits = neural_network(z)
  """
  net = slim.fully_connected(z, 28 * 28, activation_fn=None)
  net = slim.flatten(net)
  return net
开发者ID:blei-lab,项目名称:edward,代码行数:9,代码来源:factor_analysis.py

示例13: make_tower

 def make_tower(net):
     net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
     net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
     net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
     net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
     net = slim.flatten(net)
     net = slim.fully_connected(net, 500, scope='fc1')
     net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
     return net
开发者ID:Dasona,项目名称:DIGITS,代码行数:9,代码来源:siamese-TF.py

示例14: build_graph

def build_graph(top_k):
    keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
    images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
    labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
    is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
    with tf.device('/gpu:0'):
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={'is_training': is_training}):
            conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
            max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
            conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
            max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
            conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
            max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
            conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
            conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
            max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')

            flatten = slim.flatten(max_pool_4)
            fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
                                       activation_fn=tf.nn.relu, scope='fc1')
            logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
                                          scope='fc2')
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if update_ops:
            updates = tf.group(*update_ops)
            loss = control_flow_ops.with_dependencies([updates], loss)

        global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
        train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
        probabilities = tf.nn.softmax(logits)

        tf.summary.scalar('loss', loss)
        tf.summary.scalar('accuracy', accuracy)
        merged_summary_op = tf.summary.merge_all()
        predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
        accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))

    return {'images': images,
            'labels': labels,
            'keep_prob': keep_prob,
            'top_k': top_k,
            'global_step': global_step,
            'train_op': train_op,
            'loss': loss,
            'is_training': is_training,
            'accuracy': accuracy,
            'accuracy_top_k': accuracy_in_top_k,
            'merged_summary_op': merged_summary_op,
            'predicted_distribution': probabilities,
            'predicted_index_top_k': predicted_index_top_k,
            'predicted_val_top_k': predicted_val_top_k}
开发者ID:oraSC,项目名称:Chinese-Character-Recognition,代码行数:57,代码来源:chinese_character_recognition_bn.py

示例15: _add_single_ssd_head

 def _add_single_ssd_head(self, blob, num_classes, num_anchors, prefix, suffix=''):
   with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None, padding='SAME', normalizer_params=None):
     if len(blob.shape) == 4:
       locs = slim.conv2d(blob, num_anchors * 4, (3, 3),
                          scope='{}_mbox_loc{}'.format(prefix, suffix), data_format=self.data_format)
       locs = channel_to_last(locs, data_format=self.data_format)
       locs = slim.flatten(locs)
       conf = slim.conv2d(blob, num_anchors * num_classes, (3, 3), biases_initializer=tf.constant_initializer(0.0),
                          scope='{}_mbox_conf{}'.format(prefix, suffix), data_format=self.data_format)
       conf = channel_to_last(conf, data_format=self.data_format)
       conf = slim.flatten(conf)
       self.flattens_for_tfmo.extend([locs, conf])
     elif len(blob.shape) == 2:
       locs = slim.fully_connected(blob, num_anchors * 4, activation_fn=None,
                                   scope='{}_mbox_loc{}'.format(prefix, suffix))
       conf = slim.fully_connected(blob, num_anchors * num_classes, activation_fn=None,
                                   scope='{}_mbox_conf{}'.format(prefix, suffix))
     else:
       raise Exception('Unsupported input blob shape for SSD.')
     return conf, locs
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:20,代码来源:ssd_base.py


注:本文中的tensorflow.contrib.slim.flatten函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。