當前位置: 首頁>>代碼示例>>Python>>正文


Python slim.flatten方法代碼示例

本文整理匯總了Python中tensorflow.contrib.slim.flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python slim.flatten方法的具體用法?Python slim.flatten怎麽用?Python slim.flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.flatten方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: combine_setup

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None,
                  num_goal_neurons=None):
  with tf.name_scope(name + '_' + combine_type):
    if combine_type == 'add':
      # Simple concat features from goal and image
      out = embed_img + embed_goal

    elif combine_type == 'multiply':
      # Multiply things together
      re_embed_img = tf.reshape(
          embed_img, shape=[-1, num_img_neuorons / num_goal_neurons,
                            num_goal_neurons])
      re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1])
      x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False)
      out = slim.flatten(x)
    elif combine_type == 'none' or combine_type == 'imgonly':
      out = embed_img
    elif combine_type == 'goalonly':
      out = embed_goal
    else:
      logging.fatal('Undefined combine_type: %s', combine_type)
  return out 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:vision_baseline_lstm.py

示例2: encoder

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
開發者ID:GaoangW,項目名稱:TNT,代碼行數:22,代碼來源:dfc_vae_large.py

示例3: encoder

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
開發者ID:GaoangW,項目名稱:TNT,代碼行數:21,代碼來源:dfc_vae.py

示例4: gradient_penalty

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def gradient_penalty(f, real, fake=None):
    def _interpolate(a, b=None):
        with tf.name_scope('interpolate'):
            if b is None:   # interpolation in DRAGAN
                beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
                _, variance = tf.nn.moments(a, list(range(a.shape.ndims)))
                b = a + 0.5 * tf.sqrt(variance) * beta
            shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
            alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
            inter = a + alpha * (b - a)
            inter.set_shape(a.get_shape().as_list())
            return inter

    with tf.name_scope('gradient_penalty'):
        x = _interpolate(real, fake)
        pred = f(x)
        if isinstance(pred, tuple):
            pred = pred[0]
        grad = tf.gradients(pred, x)[0]
        norm = tf.norm(slim.flatten(grad), axis=1)
        gp = tf.reduce_mean((norm - 1.)**2)
        return gp 
開發者ID:csmliu,項目名稱:STGAN,代碼行數:24,代碼來源:models.py

示例5: se_module

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def se_module(input_net, ratio=16, reuse = None, scope = None):
    with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
        h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
        assert c % ratio == 0
        hidden_units = int(c / ratio)
        squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
        excitation = slim.flatten(squeeze)
        excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
                                weights_regularizer=None,
                                weights_initializer=slim.xavier_initializer(), 
                                activation_fn=tf.nn.relu)
        excitation = slim.fully_connected(excitation, c, scope='se_fc2',
                                weights_regularizer=None,
                                weights_initializer=slim.xavier_initializer(), 
                                activation_fn=tf.nn.sigmoid)        
        excitation = tf.reshape(excitation, [-1,1,1,c])
        output_net = input_net * excitation

        return output_net 
開發者ID:seasonSH,項目名稱:Probabilistic-Face-Embeddings,代碼行數:21,代碼來源:sphere_net_PFE.py

示例6: content_extractor

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def content_extractor(self, images, reuse=False):
        # images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
        
        if images.get_shape()[3] == 1:
            # For mnist dataset, replicate the gray scale image 3 times.
            images = tf.image.grayscale_to_rgb(images)
        
        with tf.variable_scope('content_extractor', reuse=reuse):
            with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                                 stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                    activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
                    
                    net = slim.conv2d(images, 64, [3, 3], scope='conv1')   # (batch_size, 16, 16, 64)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d(net, 128, [3, 3], scope='conv2')     # (batch_size, 8, 8, 128)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d(net, 256, [3, 3], scope='conv3')     # (batch_size, 4, 4, 256)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4')   # (batch_size, 1, 1, 128)
                    net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
                    if self.mode == 'pretrain':
                        net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
                        net = slim.flatten(net)
                    return net 
開發者ID:yunjey,項目名稱:domain-transfer-network,代碼行數:27,代碼來源:model.py

示例7: discriminator

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def discriminator(self, images, reuse=False):
        # images: (batch, 32, 32, 1)
        with tf.variable_scope('discriminator', reuse=reuse):
            with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                                 stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                    activation_fn=tf.nn.relu, is_training=(self.mode=='train')):
                    
                    net = slim.conv2d(images, 128, [3, 3], activation_fn=tf.nn.relu, scope='conv1')   # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d(net, 256, [3, 3], scope='conv2')   # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d(net, 512, [3, 3], scope='conv3')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d(net, 1, [4, 4], padding='VALID', scope='conv4')   # (batch_size, 1, 1, 1)
                    net = slim.flatten(net)
                    return net 
開發者ID:yunjey,項目名稱:domain-transfer-network,代碼行數:19,代碼來源:model.py

示例8: _fc_iter

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def _fc_iter(self, mem_pool5, is_training, name, iter):
    xavier = tf.contrib.layers.variance_scaling_initializer()
    
    with tf.variable_scope(name):
      mem_fc7 = slim.flatten(mem_pool5, scope='flatten')
      with slim.arg_scope([slim.fully_connected], 
                          activation_fn=tf.nn.relu, 
                          trainable=is_training,
                          weights_initializer=xavier,
                          biases_initializer=tf.constant_initializer(0.0)):
        for i in xrange(cfg.MEM.FC_L):
          mem_fc7 = slim.fully_connected(mem_fc7, 
                                        cfg.MEM.FC_C, 
                                        scope="mem_fc%d" % (i+6))
          self._act_summaries.append(mem_fc7)
          self._score_summaries[iter].append(mem_fc7)

    return mem_fc7 
開發者ID:endernewton,項目名稱:iter-reason,代碼行數:20,代碼來源:base_memory.py

示例9: build_predictions

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def build_predictions(self, net, rois, is_training, initializer, initializer_bbox):

        # Crop image ROIs
        pool5 = self._crop_pool_layer(net, rois, "pool5")
        pool5_flat = slim.flatten(pool5, scope='flatten')

        # Fully connected layers
        fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
        if is_training:
            fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')

        fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
        if is_training:
            fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')

        # Scores and predictions
        cls_score = slim.fully_connected(fc7, self._num_classes, weights_initializer=initializer, trainable=is_training, activation_fn=None, scope='cls_score')
        cls_prob = self._softmax_layer(cls_score, "cls_prob")
        bbox_prediction = slim.fully_connected(fc7, self._num_classes * 4, weights_initializer=initializer_bbox, trainable=is_training, activation_fn=None, scope='bbox_pred')

        return cls_score, cls_prob, bbox_prediction 
開發者ID:dBeker,項目名稱:Faster-RCNN-TensorFlow-Python3,代碼行數:23,代碼來源:vgg16.py

示例10: __call__

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def __call__(self, inputs, state, scope=None):
        batch_size = tf.shape(inputs)[0]

        if self._apply_to == 'input':
            inputs = slim.flatten(inputs) if self._shape == -1 else tf.reshape(inputs, [batch_size] + self._shape)
            return self._cell(inputs, state)
        elif self._apply_to == 'output':
            output, res_state = self._cell(inputs, state)
            output = slim.flatten(output) if self._shape == -1 else tf.reshape(output, [batch_size] + self._shape)
            return output, res_state
        elif self._apply_to == 'state':
            output, res_state = self._cell(inputs, state)
            res_state = slim.flatten(res_state) if self._shape == -1 else tf.reshape(res_state, [batch_size] + self._shape)
            return output, res_state
        else:
            raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to)) 
開發者ID:sjoerdvansteenkiste,項目名稱:Neural-EM,代碼行數:18,代碼來源:network.py

示例11: forward

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def forward(self):
		temp = tf.transpose(
			self.inp.out, [0,3,1,2])
		self.out = slim.flatten(
			temp, scope = self.scope) 
開發者ID:AmeyaWagh,項目名稱:Traffic_sign_detection_YOLO,代碼行數:7,代碼來源:simple.py

示例12: encoder

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def encoder(self, x):
        """ Convolutional variational encoder to encode image into a low-dimensional latent code
        If config.conv == False it is a MLP VAE. If config.use_vae == False, it is a normal encoder
        :param x: sequence of images
        :return: a, a_mu, a_var
        """
        with tf.variable_scope('vae/encoder'):
            if self.config.conv:
                x_flat_conv = tf.reshape(x, (-1, self.d1, self.d2, 1))
                enc_hidden = slim.stack(x_flat_conv,
                                        slim.conv2d,
                                        self.num_filters,
                                        kernel_size=self.config.filter_size,
                                        stride=2,
                                        activation_fn=self.activation_fn,
                                        padding='SAME')
                enc_flat = slim.flatten(enc_hidden)
                self.enc_shape = enc_hidden.get_shape().as_list()[1:]

            else:
                x_flat = tf.reshape(x, (-1, self.d1 * self.d2))
                enc_flat = slim.repeat(x_flat, self.config.num_layers, slim.fully_connected,
                                       self.config.vae_num_units, self.activation_fn)

            a_mu = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=None)
            if self.config.use_vae:
                a_var = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=tf.nn.sigmoid)
                a_var = self.config.noise_emission * a_var
                a = simple_sample(a_mu, a_var)
            else:
                a_var = tf.constant(1., dtype=tf.float32, shape=())
                a = a_mu
            a_seq = tf.reshape(a, tf.stack((-1, self.ph_steps, self.config.dim_a)))
        return a_seq, a_mu, a_var 
開發者ID:simonkamronn,項目名稱:kvae,代碼行數:36,代碼來源:KalmanVariationalAutoencoder.py

示例13: encoder

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images
                    
                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
                    
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
                    
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
開發者ID:GaoangW,項目名稱:TNT,代碼行數:31,代碼來源:dfc_vae_resnet.py

示例14: _head_to_tail

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def _head_to_tail(self, pool5, is_training, reuse=None):
        with tf.variable_scope(self._scope, self._scope, reuse=reuse):
            pool5_flat = slim.flatten(pool5, scope='flatten')
            fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
            if is_training:
                fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,
                                   scope='dropout6')
            fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
            if is_training:
                fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,
                                   scope='dropout7')

        return fc7 
開發者ID:wanjinchang,項目名稱:SSH-TensorFlow,代碼行數:15,代碼來源:vgg16.py

示例15: l2_batch_normalize

# 需要導入模塊: from tensorflow.contrib import slim [as 別名]
# 或者: from tensorflow.contrib.slim import flatten [as 別名]
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
    """
    Helper function to normalize a batch of vectors.
    :param x: the input placeholder
    :param epsilon: stabilizes division
    :return: the batch of l2 normalized vector
    """
    with tf.name_scope(scope, "l2_batch_normalize") as scope:
        x_shape = tf.shape(x)
        x = slim.flatten(x)
        x /= (epsilon + tf.reduce_max(tf.abs(x), 1, keep_dims=True))
        square_sum = tf.reduce_sum(tf.square(x), 1, keep_dims=True)
        x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
        x_norm = tf.multiply(x, x_inv_norm)
        return tf.reshape(x_norm, x_shape, scope) 
開發者ID:evtimovi,項目名稱:robust_physical_perturbations,代碼行數:17,代碼來源:utils_tf.py


注:本文中的tensorflow.contrib.slim.flatten方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。