当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowUtils.conv2d_strided方法代码示例

本文整理汇总了Python中TensorflowUtils.conv2d_strided方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.conv2d_strided方法的具体用法?Python TensorflowUtils.conv2d_strided怎么用?Python TensorflowUtils.conv2d_strided使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.conv2d_strided方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: inference_conv

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def inference_conv(image):
    # incomplete :/
    image_reshaped = tf.reshape(image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable([3, 3, 1, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        add_to_reg_loss_and_summary(W_conv1, b_conv1)
        h_conv1 = tf.nn.tanh(utils.conv2d_basic(image_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        add_to_reg_loss_and_summary(W_conv2, b_conv2)
        h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        add_to_reg_loss_and_summary(W_conv3, b_conv3)
        h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        add_to_reg_loss_and_summary(W_conv4, b_conv4)
        h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:28,代码来源:MNISTAutoEncoder.py

示例2: encoder_conv

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def encoder_conv(image):
    with tf.name_scope("enc_conv1") as scope:
        W_conv1 = utils.weight_variable([3, 3, 3, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        h_conv1 = tf.nn.tanh(utils.conv2d_strided(image, W_conv1, b_conv1))

    with tf.name_scope("enc_conv2") as scope:
        W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("enc_conv3") as scope:
        W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("enc_conv4") as scope:
        W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))

    with tf.name_scope("enc_fc") as scope:
        image_size = IMAGE_SIZE // 16
        h_conv4_flatten = tf.reshape(h_conv4, [-1, image_size * image_size * 256])
        W_fc5 = utils.weight_variable([image_size * image_size * 256, 512], name="W_fc5")
        b_fc5 = utils.bias_variable([512], name="b_fc5")
        encoder_val = tf.matmul(h_conv4_flatten, W_fc5) + b_fc5

    return encoder_val
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:31,代码来源:ImageAnalogy.py

示例3: encoder

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def encoder(dataset, train_mode):
    with tf.variable_scope("Encoder"):
        with tf.name_scope("enc_conv1") as scope:
            W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 3, 32], name="W_conv1")
            b_conv1 = utils.bias_variable([32], name="b_conv1")
            h_conv1 = utils.conv2d_strided(dataset, W_conv1, b_conv1)
            h_bn1 = utils.batch_norm(h_conv1, 32, train_mode, scope="conv1_bn")
            h_relu1 = tf.nn.relu(h_bn1)

        with tf.name_scope("enc_conv2") as scope:
            W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
            b_conv2 = utils.bias_variable([64], name="b_conv2")
            h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
            h_bn2 = utils.batch_norm(h_conv2, 64, train_mode, scope="conv2_bn")
            h_relu2 = tf.nn.relu(h_bn2)

        with tf.name_scope("enc_conv3") as scope:
            W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
            b_conv3 = utils.bias_variable([128], name="b_conv3")
            h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
            h_bn3 = utils.batch_norm(h_conv3, 128, train_mode, scope="conv3_bn")
            h_relu3 = tf.nn.relu(h_bn3)

        with tf.name_scope("enc_conv4") as scope:
            W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
            b_conv4 = utils.bias_variable([256], name="b_conv4")
            h_conv4 = utils.conv2d_strided(h_relu3, W_conv4, b_conv4)
            h_bn4 = utils.batch_norm(h_conv4, 256, train_mode, scope="conv4_bn")
            h_relu4 = tf.nn.relu(h_bn4)

        with tf.name_scope("enc_conv5") as scope:
            W_conv5 = utils.weight_variable_xavier_initialized([3, 3, 256, 512], name="W_conv5")
            b_conv5 = utils.bias_variable([512], name="b_conv5")
            h_conv5 = utils.conv2d_strided(h_relu4, W_conv5, b_conv5)
            h_bn5 = utils.batch_norm(h_conv5, 512, train_mode, scope="conv5_bn")
            h_relu5 = tf.nn.relu(h_bn5)

        with tf.name_scope("enc_fc") as scope:
            image_size = IMAGE_SIZE // 32
            h_relu5_flatten = tf.reshape(h_relu5, [-1, image_size * image_size * 512])
            W_fc = utils.weight_variable([image_size * image_size * 512, 1024], name="W_fc")
            b_fc = utils.bias_variable([1024], name="b_fc")
            encoder_val = tf.matmul(h_relu5_flatten, W_fc) + b_fc

    return encoder_val
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:47,代码来源:ContextInpainting.py

示例4: inference_strided

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def inference_strided(input_image):
    W1 = utils.weight_variable([9, 9, 3, 32])
    b1 = utils.bias_variable([32])
    tf.histogram_summary("W1", W1)
    tf.histogram_summary("b1", b1)
    h_conv1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))

    W2 = utils.weight_variable([3, 3, 32, 64])
    b2 = utils.bias_variable([64])
    tf.histogram_summary("W2", W2)
    tf.histogram_summary("b2", b2)
    h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W2, b2))

    W3 = utils.weight_variable([3, 3, 64, 128])
    b3 = utils.bias_variable([128])
    tf.histogram_summary("W3", W3)
    tf.histogram_summary("b3", b3)
    h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W3, b3))

    # upstrides
    W4 = utils.weight_variable([3, 3, 64, 128])
    b4 = utils.bias_variable([64])
    tf.histogram_summary("W4", W4)
    tf.histogram_summary("b4", b4)
    # print h_conv3.get_shape()
    # print W4.get_shape()
    h_conv4 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv3, W4, b4))

    W5 = utils.weight_variable([3, 3, 32, 64])
    b5 = utils.bias_variable([32])
    tf.histogram_summary("W5", W5)
    tf.histogram_summary("b5", b5)
    h_conv5 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv4, W5, b5))

    W6 = utils.weight_variable([9, 9, 32, 3])
    b6 = utils.bias_variable([3])
    tf.histogram_summary("W6", W6)
    tf.histogram_summary("b6", b6)
    pred_image = tf.nn.tanh(utils.conv2d_basic(h_conv5, W6, b6))

    return pred_image
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:43,代码来源:GenerativeNeuralStyle.py

示例5: inference_fully_convolutional

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def inference_fully_convolutional(dataset):
    '''
    Fully convolutional inference on notMNIST dataset
    :param datset: [batch_size, 28*28*1] tensor
    :return: logits
    '''
    dataset_reshaped = tf.reshape(dataset, [-1, 28, 28, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 1, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        h_conv1 = tf.nn.relu(utils.conv2d_strided(dataset_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        h_conv4 = tf.nn.relu(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))

    with tf.name_scope("conv5") as scope:
        # W_conv5 = utils.weight_variable_xavier_initialized([2, 2, 256, 512], name="W_conv5")
        # b_conv5 = utils.bias_variable([512], name="b_conv5")
        # h_conv5 = tf.nn.relu(utils.conv2d_strided(h_conv4, W_conv5, b_conv5))
        h_conv5 = utils.avg_pool_2x2(h_conv4)

    with tf.name_scope("conv6") as scope:
        W_conv6 = utils.weight_variable_xavier_initialized([1, 1, 256, 10], name="W_conv6")
        b_conv6 = utils.bias_variable([10], name="b_conv6")
        logits = tf.nn.relu(utils.conv2d_basic(h_conv5, W_conv6, b_conv6))
        print logits.get_shape()
        logits = tf.reshape(logits, [-1, 10])
    return logits
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:42,代码来源:notMNISTFullyConvultional.py

示例6: discriminator

# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import conv2d_strided [as 别名]
def discriminator(input_images, train_mode):
    # dropout_prob = 1.0
    # if train_mode:
    #     dropout_prob = 0.5
    W_conv0 = utils.weight_variable([5, 5, NUM_OF_CHANNELS, 64 * 1], name="W_conv0")
    b_conv0 = utils.bias_variable([64 * 1], name="b_conv0")
    h_conv0 = utils.conv2d_strided(input_images, W_conv0, b_conv0)
    h_bn0 = h_conv0  # utils.batch_norm(h_conv0, 64 * 1, train_mode, scope="disc_bn0")
    h_relu0 = utils.leaky_relu(h_bn0, 0.2, name="h_relu0")
    utils.add_activation_summary(h_relu0)

    W_conv1 = utils.weight_variable([5, 5, 64 * 1, 64 * 2], name="W_conv1")
    b_conv1 = utils.bias_variable([64 * 2], name="b_conv1")
    h_conv1 = utils.conv2d_strided(h_relu0, W_conv1, b_conv1)
    h_bn1 = utils.batch_norm(h_conv1, 64 * 2, train_mode, scope="disc_bn1")
    h_relu1 = utils.leaky_relu(h_bn1, 0.2, name="h_relu1")
    utils.add_activation_summary(h_relu1)

    W_conv2 = utils.weight_variable([5, 5, 64 * 2, 64 * 4], name="W_conv2")
    b_conv2 = utils.bias_variable([64 * 4], name="b_conv2")
    h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
    h_bn2 = utils.batch_norm(h_conv2, 64 * 4, train_mode, scope="disc_bn2")
    h_relu2 = utils.leaky_relu(h_bn2, 0.2, name="h_relu2")
    utils.add_activation_summary(h_relu2)

    W_conv3 = utils.weight_variable([5, 5, 64 * 4, 64 * 8], name="W_conv3")
    b_conv3 = utils.bias_variable([64 * 8], name="b_conv3")
    h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
    h_bn3 = utils.batch_norm(h_conv3, 64 * 8, train_mode, scope="disc_bn3")
    h_relu3 = utils.leaky_relu(h_bn3, 0.2, name="h_relu3")
    utils.add_activation_summary(h_relu3)

    shape = h_relu3.get_shape().as_list()
    h_3 = tf.reshape(h_relu3, [FLAGS.batch_size, (IMAGE_SIZE // 16) * (IMAGE_SIZE // 16) * shape[3]])
    W_4 = utils.weight_variable([h_3.get_shape().as_list()[1], 1], name="W_4")
    b_4 = utils.bias_variable([1], name="b_4")
    h_4 = tf.matmul(h_3, W_4) + b_4

    return tf.nn.sigmoid(h_4), h_4, h_relu3
开发者ID:shekkizh,项目名称:TensorflowProjects,代码行数:41,代码来源:Flowers_GAN.py


注:本文中的TensorflowUtils.conv2d_strided方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。