当前位置: 首页>>代码示例>>Python>>正文


Python slim.conv2d_transpose方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim.conv2d_transpose方法的典型用法代码示例。如果您正苦于以下问题:Python slim.conv2d_transpose方法的具体用法?Python slim.conv2d_transpose怎么用?Python slim.conv2d_transpose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.conv2d_transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _extra_conv_arg_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc 
开发者ID:CharlesShang,项目名称:FastMaskRCNN,代码行数:18,代码来源:pyramid_network.py

示例2: upsample

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def upsample(x,scale=2,features=64,activation=tf.nn.relu):
	assert scale in [2,3,4]
	x = slim.conv2d(x,features,[3,3],activation_fn=activation)
	if scale == 2:
		ps_features = 3*(scale**2)
		x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
		#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
		x = PS(x,2,color=True)
	elif scale == 3:
		ps_features =3*(scale**2)
		x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
		#x = slim.conv2d_transpose(x,ps_features,9,stride=1,activation_fn=activation)
		x = PS(x,3,color=True)
	elif scale == 4:
		ps_features = 3*(2**2)
		for i in range(2):
			x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
			#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
			x = PS(x,2,color=True)
	return x 
开发者ID:jmiller656,项目名称:EDSR-Tensorflow,代码行数:22,代码来源:utils.py

示例3: build_model

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def build_model(self):
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu):
            with tf.variable_scope('model', reuse=self.reuse_variables):

                self.left_pyramid  = self.scale_pyramid(self.left,  4)
                if self.mode == 'train':
                    self.right_pyramid = self.scale_pyramid(self.right, 4)

                self.model_input = self.left

                #build model
                if self.params.encoder == 'vgg':
                    self.build_vgg(self.model_input)
                elif self.params.encoder == 'resnet50':
                    self.build_resnet50()
                else:
                    return None 
开发者ID:CVLAB-Unibo,项目名称:Semantic-Mono-Depth,代码行数:19,代码来源:monodepth_model.py

示例4: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(self, inputs, reuse=False):
        # inputs: (batch, 1, 1, 128)
        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,           
                                 stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                     activation_fn=tf.nn.relu, is_training=(self.mode=='train')):

                    net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID', scope='conv_transpose1')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2')  # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3')  # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d_transpose(net, 1, [3, 3], activation_fn=tf.nn.tanh, scope='conv_transpose4')   # (batch_size, 32, 32, 1)
                    return net 
开发者ID:yunjey,项目名称:domain-transfer-network,代码行数:18,代码来源:model.py

示例5: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    bn_kwargs = {
        'is_training': is_training, 'updates_collections': None
    }

    # Network
    net = slim.fully_connected(z, output_size//16 * output_size//16 * 8*f_dim,
        activation_fn=None, normalizer_fn=None
        )
    net = tf.reshape(net, [-1, output_size//16, output_size//16, 8*f_dim])
    net = lrelu(slim.batch_norm(net, **bn_kwargs))

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=lrelu, normalizer_params=bn_kwargs,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 4*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, 2*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:26,代码来源:dcgan4.py

示例6: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * f_dim, activation_fn=None)
    net = tf.reshape(net, [-1, output_size//8, output_size//8, f_dim])
    net = lrelu(net)

    conv2d_trp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu
    )

    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:20,代码来源:dcgan3_nobn_cf.py

示例7: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//16 * output_size//16 * f_dim, activation_fn=None)
    net = tf.reshape(net, [-1, output_size//16, output_size//16, f_dim])
    net = lrelu(net)

    conv2d_trp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu
    )

    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:21,代码来源:dcgan4_nobn_cf.py

示例8: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * 4*f_dim, activation_fn=tf.nn.relu)
    net = tf.reshape(net, [-1, output_size//8, output_size//8, 4*f_dim])

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=tf.nn.relu,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 2*f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:18,代码来源:dcgan3_nobn.py

示例9: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, 512, activation_fn=lrelu)
    net = slim.fully_connected(net, output_size//16 * output_size//16 * f_dim, activation_fn=lrelu)
    net = tf.reshape(net, [-1, output_size//16, output_size//16, f_dim])

    conv2dtrp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu)

    with conv2dtrp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:20,代码来源:conv4.py

示例10: generator

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def generator(z, f_dim, output_size, c_dim, is_training=True):
    bn_kwargs = {
        'is_training': is_training, 'updates_collections': None
    }

    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * 4*f_dim,
        activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=bn_kwargs
        )
    net = tf.reshape(net, [-1, output_size//8, output_size//8, 4*f_dim])

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=tf.nn.relu, normalizer_params=bn_kwargs,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 2*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
开发者ID:LMescheder,项目名称:TheNumericsOfGANs,代码行数:24,代码来源:dcgan3.py

示例11: readout_general

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def readout_general(multi_scale_belief, num_neurons, strides, layers_per_block,
                    kernel_size, batch_norm_is_training_op, wt_decay):
  multi_scale_belief = tf.stop_gradient(multi_scale_belief)
  with tf.variable_scope('readout_maps_deconv'):
    x, outs = deconv(multi_scale_belief, batch_norm_is_training_op,
                     wt_decay=wt_decay, neurons=num_neurons, strides=strides,
                     layers_per_block=layers_per_block, kernel_size=kernel_size,
                     conv_fn=slim.conv2d_transpose, offset=0,
                     name='readout_maps_deconv')
    probs = tf.sigmoid(x)
  return x, probs 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:13,代码来源:cmp.py

示例12: _building_ctx

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def _building_ctx(self, scope_name, reuse):
        with tf.variable_scope(scope_name, reuse=reuse):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, residual_block],
                                weights_regularizer=slim.l2_regularizer(self.config.regularization_factor),
                                data_format='NCHW'):
                yield 
开发者ID:fab-jul,项目名称:imgcomp-cvpr,代码行数:8,代码来源:autoencoder.py

示例13: _batch_norm_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def _batch_norm_scope(self, is_training):
        batch_norm_params = self._batch_norm_params(is_training)
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            with slim.arg_scope([slim.batch_norm], **batch_norm_params):
                yield 
开发者ID:fab-jul,项目名称:imgcomp-cvpr,代码行数:9,代码来源:autoencoder.py

示例14: _decode

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def _decode(self, q, is_training):
        with self._batch_norm_scope(is_training):
            n = arch_param_n
            fa = 3
            fb = 5
            net = slim.conv2d_transpose(q, n, [fa, fa], stride=2, scope='from_bn')
            residual_input_0 = net
            for b in range(self.config.arch_param_B):
                residual_input_b = net
                with tf.variable_scope('res_block_dec_{}'.format(b)):
                    net = residual_block(net, n, num_conv2d=2, kernel_size=[3, 3], scope='dec_{}_1'.format(b))
                    net = residual_block(net, n, num_conv2d=2, kernel_size=[3, 3], scope='dec_{}_2'.format(b))
                    net = residual_block(net, n, num_conv2d=2, kernel_size=[3, 3], scope='dec_{}_3'.format(b))
                net = net + residual_input_b
            net = residual_block(net, n, num_conv2d=2, kernel_size=[3, 3], scope='dec_after_res',
                                 activation_fn=None)
            net = net + residual_input_0

            net = slim.conv2d_transpose(net, n // 2, [fb, fb], stride=2, scope='h12')
            net = slim.conv2d_transpose(net, 3, [fb, fb], stride=2, scope='h13', activation_fn=None)
            net = self._denormalize(net)
            net = self._clip_to_image_range(net)
            return net


# ------------------------------------------------------------------------------ 
开发者ID:fab-jul,项目名称:imgcomp-cvpr,代码行数:28,代码来源:autoencoder.py

示例15: deconv_bn_relu

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import conv2d_transpose [as 别名]
def deconv_bn_relu(inputs, filters, kernel_size=4, strides=2):
    """Paramaters for Deconvolution were chosen to avoid artifacts, following
        link https://distill.pub/2016/deconv-checkerboard/
        """
    with tf.variable_scope(None, 'deconv_bn_relu'):
        output=slim.conv2d_transpose(inputs,filters,kernel_size=kernel_size,stride=strides,biases_initializer=None,activation_fn=None)
        output = slim.batch_norm(output, activation_fn=tf.nn.relu, fused=False)
        return output 
开发者ID:xggIoU,项目名称:centernet_tensorflow_wilderface_voc,代码行数:10,代码来源:layer_utils.py


注:本文中的tensorflow.contrib.slim.conv2d_transpose方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。