当前位置: 首页>>代码示例>>Python>>正文


Python ops.lrelu方法代码示例

本文整理汇总了Python中ops.lrelu方法的典型用法代码示例。如果您正苦于以下问题:Python ops.lrelu方法的具体用法?Python ops.lrelu怎么用?Python ops.lrelu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ops的用法示例。


在下文中一共展示了ops.lrelu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Discriminator function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = ops.lrelu(h0)
            h1 = ops.conv2d(opts, h0, num_filters * 2, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = ops.lrelu(h1)
            h2 = ops.conv2d(opts, h1, num_filters * 4, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = ops.lrelu(h2)
            h3 = ops.linear(opts, h2, 1, scope='h3_lin')

        return h3 
开发者ID:tolstikhin,项目名称:adagan,代码行数:22,代码来源:gan.py

示例2: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator(self, image, y=None, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        s = self.output_size
        if np.mod(s, 16) == 0:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
            h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return tf.nn.sigmoid(h4), h4
        else:
            h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
            h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
            if not self.config.use_kernel:
              return tf.nn.sigmoid(h2), h2
            else:
              return tf.nn.sigmoid(h2), h2, h1, h0 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:23,代码来源:model_mmd.py

示例3: discriminator_labeler

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits, variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例4: discriminator_gen_labeler

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
        D_labels = tf.nn.sigmoid(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels, D_labels_logits,variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例5: discriminator_on_z

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator_on_z(image, config, reuse=None):
    batch_size=tf.shape(image)[0]
    with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
        dl_bn1 = batch_norm(name='dl_bn1')
        dl_bn2 = batch_norm(name='dl_bn2')
        dl_bn3 = batch_norm(name='dl_bn3')

        h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
        h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
        h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
        h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
        dim3=np.prod(h3.get_shape().as_list()[1:])
        h3_flat=tf.reshape(h3, [-1,dim3])
        D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
        D_labels = tf.nn.tanh(D_labels_logits)
        variables = tf.contrib.framework.get_variables(vs)
    return D_labels,variables 
开发者ID:mkocaoglu,项目名称:CausalGAN,代码行数:19,代码来源:models.py

示例6: discriminate

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminate(self, x_var, reuse=False):

        with tf.variable_scope("discriminator") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(conv2d(x_var, output_dim=64, name='dis_conv1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2'))
            conv4 = conv2d(conv3, output_dim=512, name='dis_conv4')
            middle_conv = conv4
            conv4 = lrelu(instance_norm(conv4, scope='dis_bn3'))
            conv5 = lrelu(instance_norm(conv2d(conv4, output_dim=1024, name='dis_conv5'), scope='dis_bn4'))

            conv6 = conv2d(conv5, output_dim=2, k_w=4, k_h=4, d_h=1, d_w=1, padding='VALID', name='dis_conv6')

            return conv6, middle_conv 
开发者ID:zhangqianhui,项目名称:Residual_Image_Learning_GAN,代码行数:19,代码来源:ResidualGAN.py

示例7: encode_decode_1

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def encode_decode_1(self, x, reuse=False):

        with tf.variable_scope("encode_decode_1") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
            # for x_{1}
            de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
                                                  , name='e_d1', k_h=3, k_w=3), scope='e_in4'))
            de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
                                                  , name='e_d2', k_w=3, k_h=3), scope='e_in5'))
            x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')

            return x_tilde1 
开发者ID:zhangqianhui,项目名称:Residual_Image_Learning_GAN,代码行数:19,代码来源:ResidualGAN.py

示例8: encode_decode_2

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def encode_decode_2(self, x, reuse=False):

        with tf.variable_scope("encode_decode_2") as scope:
            if reuse == True:
                scope.reuse_variables()

            conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1',
                                       ))
            conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))

            conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
            # for x_{1}
            de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
                                                  , name='e_d1', k_h=3, k_w=3), scope='e_in4',
                                          ))
            de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
                                                  , name='e_d2', k_w=3, k_h=3), scope='e_in5',
                                          ))
            x_tilde = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')

            return x_tilde 
开发者ID:zhangqianhui,项目名称:Residual_Image_Learning_GAN,代码行数:23,代码来源:ResidualGAN.py

示例9: _create_discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def _create_discriminator(self, x, train=True, reuse=False, name="discriminator"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            h = x
            for i in range(self.num_conv_layers):
                h = lrelu(batch_norm(conv2d(h, self.num_dis_feature_maps * (2 ** i),
                                            stddev=0.02, name="d_h{}_conv".format(i)),
                                     is_training=train,
                                     scope="d_bn{}".format(i)))

            dim = h.get_shape()[1:].num_elements()
            h = tf.reshape(h, [-1, dim])
            d_bin_logits = linear(h, 1, scope='d_bin_logits')
            d_mul_logits = linear(h, self.num_gens, scope='d_mul_logits')
        return d_bin_logits, d_mul_logits 
开发者ID:qhoangdl,项目名称:MGAN,代码行数:19,代码来源:models.py

示例10: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator(hparams, x, scope_name, train, reuse):

    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()

        d_bn1 = ops.batch_norm(name='d_bn1')
        d_bn2 = ops.batch_norm(name='d_bn2')
        d_bn3 = ops.batch_norm(name='d_bn3')

        h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

        h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')
        h1 = ops.lrelu(d_bn1(h1, train=train))

        h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')
        h2 = ops.lrelu(d_bn2(h2, train=train))

        h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')
        h3 = ops.lrelu(d_bn3(h3, train=train))

        h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

        d_logit = h4
        d = tf.nn.sigmoid(d_logit)

    return d, d_logit 
开发者ID:AshishBora,项目名称:csgm,代码行数:29,代码来源:model_def_new.py

示例11: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator(hparams, x, train, reuse):

    if reuse:
        tf.get_variable_scope().reuse_variables()

    d_bn1 = ops.batch_norm(name='d_bn1')
    d_bn2 = ops.batch_norm(name='d_bn2')
    d_bn3 = ops.batch_norm(name='d_bn3')

    h0 = ops.lrelu(ops.conv2d(x, hparams.df_dim, name='d_h0_conv'))

    h1 = ops.conv2d(h0, hparams.df_dim*2, name='d_h1_conv')
    h1 = ops.lrelu(d_bn1(h1, train=train))

    h2 = ops.conv2d(h1, hparams.df_dim*4, name='d_h2_conv')
    h2 = ops.lrelu(d_bn2(h2, train=train))

    h3 = ops.conv2d(h2, hparams.df_dim*8, name='d_h3_conv')
    h3 = ops.lrelu(d_bn3(h3, train=train))

    h4 = ops.linear(tf.reshape(h3, [hparams.batch_size, -1]), 1, 'd_h3_lin')

    d_logit = h4
    d = tf.nn.sigmoid(d_logit)

    return d, d_logit 
开发者ID:AshishBora,项目名称:csgm,代码行数:28,代码来源:model_def.py

示例12: forward

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def forward(self, h, is_training):
        print(" [Build] Spatial Predictor ; is_training: {}".format(is_training))
        update_collection = self._get_update_collection(is_training)
        with tf.variable_scope("Q_content_prediction_head", reuse=tf.AUTO_REUSE):
            h = snlinear(h, self.aux_dim, 'fc1', update_collection=update_collection)
            h = batch_norm(name='bn1')(h, is_training=is_training)
            h = lrelu(h)
            h = snlinear(h, self.z_dim, 'fc2', update_collection=update_collection)
            return tf.nn.tanh(h) 
开发者ID:hubert0527,项目名称:COCO-GAN,代码行数:11,代码来源:content_predictor.py

示例13: forward

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def forward(self, h, is_training):
        print(" [Build] Spatial Predictor ; is_training: {}".format(is_training))
        update_collection = self._get_update_collection(is_training)
        with tf.variable_scope("GD_spatial_prediction_head", reuse=tf.AUTO_REUSE):
            h = snlinear(h, self.aux_dim, 'fc1', update_collection=update_collection)
            h = batch_norm(name='bn1')(h, is_training=is_training)
            h = lrelu(h)
            h = snlinear(h, self.spatial_dim, 'fc2', update_collection=update_collection)
            return tf.nn.tanh(h) 
开发者ID:hubert0527,项目名称:COCO-GAN,代码行数:11,代码来源:spatial_prediction.py

示例14: __init__

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def __init__(self, config,
                 debug_information=False,
                 is_train=True):
        self.debug = debug_information

        self.config = config
        self.batch_size = self.config.batch_size
        self.input_height = self.config.data_info[0]
        self.input_width = self.config.data_info[1]
        self.num_class = self.config.data_info[2]
        self.c_dim = self.config.data_info[3]
        self.visualize_shape = self.config.visualize_shape
        self.conv_info = self.config.conv_info
        self.activation_fn = {
            'selu': selu,
            'relu': tf.nn.relu,
            'lrelu': lrelu,
        }[self.config.activation]

        # create placeholders for the input
        self.image = tf.placeholder(
            name='image', dtype=tf.float32,
            shape=[self.batch_size, self.input_height, self.input_width, self.c_dim],
        )
        self.label = tf.placeholder(
            name='label', dtype=tf.float32, shape=[self.batch_size, self.num_class],
        )

        self.is_training = tf.placeholder_with_default(bool(is_train), [], name='is_training')

        self.build(is_train=is_train) 
开发者ID:shaohua0116,项目名称:Activation-Visualization-Histogram,代码行数:33,代码来源:model.py

示例15: discriminator

# 需要导入模块: import ops [as 别名]
# 或者: from ops import lrelu [as 别名]
def discriminator(input, is_train, reuse=False):
    c2, c4, c8 = 16, 32, 64  # channel num,32, 64, 128
    with tf.variable_scope('dis') as scope:
        if reuse:
            scope.reuse_variables()
        # 16*16*32
        conv1 = tf.layers.conv2d(input, c2, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
                                 kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                                 name='conv1')
        act1 = lrelu(conv1, n='act1')
        # 8*8*64
        conv2 = tf.layers.conv2d(act1, c4, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
                                 kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                                 name='conv2')
        bn2 = tf.layers.batch_normalization(conv2, training=is_train, name='bn2')
        act2 = lrelu(bn2, n='act2')
        # 4*4*128
        conv3 = tf.layers.conv2d(act2, c8, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
                                 kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
                                 name='conv3')
        bn3 = tf.layers.batch_normalization(conv3, training=is_train, name='bn3')
        act3 = lrelu(bn3, n='act3')

        shape = act3.get_shape().as_list()
        dim = shape[1] * shape[2] * shape[3]
        fc1 = tf.reshape(act3, shape=[-1, dim], name='fc1')
        w1 = tf.get_variable('w1', shape=[fc1.shape[1], 1], dtype=tf.float32,
                             initializer=tf.truncated_normal_initializer(stddev=0.02))
        b1 = tf.get_variable('b1', shape=[1], dtype=tf.float32,
                             initializer=tf.constant_initializer(0.0))

        # wgan just get rid of the sigmoid
        output = tf.add(tf.matmul(fc1, w1), b1, name='output')
        return output 
开发者ID:handspeaker,项目名称:gan_practice,代码行数:36,代码来源:wgan.py


注:本文中的ops.lrelu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。