當前位置: 首頁>>代碼示例>>Python>>正文


Python TensorflowUtils.conv2d_basic方法代碼示例

本文整理匯總了Python中TensorflowUtils.conv2d_basic方法的典型用法代碼示例。如果您正苦於以下問題:Python TensorflowUtils.conv2d_basic方法的具體用法?Python TensorflowUtils.conv2d_basic怎麽用?Python TensorflowUtils.conv2d_basic使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在TensorflowUtils的用法示例。


在下文中一共展示了TensorflowUtils.conv2d_basic方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: inference_simple

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference_simple(dataset):
    with tf.name_scope("conv1") as scope:
        W1 = utils.weight_variable([5, 5, 1, 32], name="W1")
        b1 = utils.bias_variable([32], name="b1")
        tf.histogram_summary("W1", W1)
        tf.histogram_summary("b1", b1)
        h_conv1 = tf.nn.relu(utils.conv2d_basic(dataset, W1, b1), name="h_conv1")
        h_pool1 = utils.max_pool_2x2(h_conv1)

    with tf.name_scope("conv2") as scope:
        W2 = utils.weight_variable([3, 3, 32, 64], name="W2")
        b2 = utils.bias_variable([64], name="b2")
        tf.histogram_summary("W2", W2)
        tf.histogram_summary("b2", b2)
        h_conv2 = tf.nn.relu(utils.conv2d_basic(h_pool1, W2, b2), name="h_conv2")
        h_pool2 = utils.max_pool_2x2(h_conv2)

    with tf.name_scope("fc") as scope:
        image_size = IMAGE_SIZE // 4
        h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
        W_fc = utils.weight_variable([image_size * image_size * 64, NUM_LABELS], name="W_fc")
        b_fc = utils.bias_variable([NUM_LABELS], name="b_fc")
        tf.histogram_summary("W_fc", W_fc)
        tf.histogram_summary("b_fc", b_fc)
        pred = tf.matmul(h_flat, W_fc) + b_fc

    return pred
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:29,代碼來源:FaceDetection.py

示例2: inference

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference(dataset):
    with tf.name_scope("conv1") as scope:
        W1 = utils.weight_variable([5, 5, 1, 32], name="W1")
        b1 = utils.bias_variable([32], name="b1")
        tf.histogram_summary("W1", W1)
        tf.histogram_summary("b1", b1)
        h_conv1 = utils.conv2d_basic(dataset, W1, b1)
        h_norm1 = utils.local_response_norm(h_conv1)
        h_1 = tf.nn.relu(h_norm1, name="conv1")
        h_pool1 = utils.max_pool_2x2(h_1)

    with tf.name_scope("conv2") as scope:
        W2 = utils.weight_variable([3, 3, 32, 64], name="W2")
        b2 = utils.bias_variable([64], name="b2")
        tf.histogram_summary("W2", W2)
        tf.histogram_summary("b2", b2)
        h_conv2 = utils.conv2d_basic(h_pool1, W2, b2)
        h_norm2 = utils.local_response_norm(h_conv2)
        h_2 = tf.nn.relu(h_norm2, name="conv2")
        h_pool2 = utils.max_pool_2x2(h_2)

    with tf.name_scope("conv3") as scope:
        W3 = utils.weight_variable([3, 3, 64, 128], name="W3")
        b3 = utils.bias_variable([128], name="b3")
        tf.histogram_summary("W3", W3)
        tf.histogram_summary("b3", b3)
        h_conv3 = utils.conv2d_basic(h_pool2, W3, b3)
        h_norm3 = utils.local_response_norm(h_conv3)
        h_3 = tf.nn.relu(h_norm3, name="conv3")
        h_pool3 = utils.max_pool_2x2(h_3)

    with tf.name_scope("conv4") as scope:
        W4 = utils.weight_variable([3, 3, 128, 256], name="W4")
        b4 = utils.bias_variable([256], name="b4")
        tf.histogram_summary("W4", W4)
        tf.histogram_summary("b4", b4)
        h_conv4 = utils.conv2d_basic(h_pool3, W4, b4)
        h_norm4 = utils.local_response_norm(h_conv4)
        h_4 = tf.nn.relu(h_norm4, name="conv4")

    with tf.name_scope("fc1") as scope:
        image_size = IMAGE_SIZE // 8
        h_flat = tf.reshape(h_4, [-1, image_size * image_size * 256])
        W_fc1 = utils.weight_variable([image_size * image_size * 256, 512], name="W_fc1")
        b_fc1 = utils.bias_variable([512], name="b_fc1")
        tf.histogram_summary("W_fc1", W_fc1)
        tf.histogram_summary("b_fc1", b_fc1)
        h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)

    with tf.name_scope("fc2") as scope:
        W_fc2 = utils.weight_variable([512, NUM_LABELS], name="W_fc2")
        b_fc2 = utils.bias_variable([NUM_LABELS], name="b_fc2")
        tf.histogram_summary("W_fc2", W_fc2)
        tf.histogram_summary("b_fc2", b_fc2)
        pred = tf.matmul(h_fc1, W_fc2) + b_fc2

    return pred
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:59,代碼來源:FaceDetection.py

示例3: inference_res

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference_res(input_image):
    W1 = utils.weight_variable([3, 3, 3, 32])
    b1 = utils.bias_variable([32])
    hconv_1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))
    h_norm = utils.local_response_norm(hconv_1)
    bottleneck_1 = utils.bottleneck_unit(h_norm, 16, 16, down_stride=True, name="res_1")
    bottleneck_2 = utils.bottleneck_unit(bottleneck_1, 8, 8, down_stride=True, name="res_2")
    bottleneck_3 = utils.bottleneck_unit(bottleneck_2, 16, 16, up_stride=True, name="res_3")
    bottleneck_4 = utils.bottleneck_unit(bottleneck_3, 32, 32, up_stride=True, name="res_4")
    W5 = utils.weight_variable([3, 3, 32, 3])
    b5 = utils.bias_variable([3])
    out = tf.nn.tanh(utils.conv2d_basic(bottleneck_4, W5, b5))
    return out
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:15,代碼來源:GenerativeNeuralStyle.py

示例4: vgg_net

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3'  # 'conv3_4', 'relu3_4', 'pool3',

        # 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        # 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
        #
        # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        # 'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = np.transpose(kernels, (1, 0, 2, 3))
            bias = bias.reshape(-1)
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    assert len(net) == len(layers)
    return net
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:37,代碼來源:GenerativeNeuralStyle.py

示例5: inference_conv

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference_conv(image):
    # incomplete :/
    image_reshaped = tf.reshape(image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable([3, 3, 1, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        add_to_reg_loss_and_summary(W_conv1, b_conv1)
        h_conv1 = tf.nn.tanh(utils.conv2d_basic(image_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        add_to_reg_loss_and_summary(W_conv2, b_conv2)
        h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        add_to_reg_loss_and_summary(W_conv3, b_conv3)
        h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        add_to_reg_loss_and_summary(W_conv4, b_conv4)
        h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:28,代碼來源:MNISTAutoEncoder.py

示例6: inference_strided

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference_strided(input_image):
    W1 = utils.weight_variable([9, 9, 3, 32])
    b1 = utils.bias_variable([32])
    tf.histogram_summary("W1", W1)
    tf.histogram_summary("b1", b1)
    h_conv1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))

    W2 = utils.weight_variable([3, 3, 32, 64])
    b2 = utils.bias_variable([64])
    tf.histogram_summary("W2", W2)
    tf.histogram_summary("b2", b2)
    h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W2, b2))

    W3 = utils.weight_variable([3, 3, 64, 128])
    b3 = utils.bias_variable([128])
    tf.histogram_summary("W3", W3)
    tf.histogram_summary("b3", b3)
    h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W3, b3))

    # upstrides
    W4 = utils.weight_variable([3, 3, 64, 128])
    b4 = utils.bias_variable([64])
    tf.histogram_summary("W4", W4)
    tf.histogram_summary("b4", b4)
    # print h_conv3.get_shape()
    # print W4.get_shape()
    h_conv4 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv3, W4, b4))

    W5 = utils.weight_variable([3, 3, 32, 64])
    b5 = utils.bias_variable([32])
    tf.histogram_summary("W5", W5)
    tf.histogram_summary("b5", b5)
    h_conv5 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv4, W5, b5))

    W6 = utils.weight_variable([9, 9, 32, 3])
    b6 = utils.bias_variable([3])
    tf.histogram_summary("W6", W6)
    tf.histogram_summary("b6", b6)
    pred_image = tf.nn.tanh(utils.conv2d_basic(h_conv5, W6, b6))

    return pred_image
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:43,代碼來源:GenerativeNeuralStyle.py

示例7: inferece

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inferece(dataset, prob):
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable([5, 5, 1, 32])
        b_conv1 = utils.bias_variable([32])
        tf.histogram_summary("W_conv1", W_conv1)
        tf.histogram_summary("b_conv1", b_conv1)
        h_conv1 = utils.conv2d_basic(dataset, W_conv1, b_conv1)
        h_1 = tf.nn.relu(h_conv1)
        h_pool1 = utils.max_pool_2x2(h_1)
        add_to_regularization_loss(W_conv1, b_conv1)

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable([3, 3, 32, 64])
        b_conv2 = utils.bias_variable([64])
        tf.histogram_summary("W_conv2", W_conv2)
        tf.histogram_summary("b_conv2", b_conv2)
        h_conv2 = utils.conv2d_basic(h_pool1, W_conv2, b_conv2)
        h_2 = tf.nn.relu(h_conv2)
        h_pool2 = utils.max_pool_2x2(h_2)
        add_to_regularization_loss(W_conv2, b_conv2)

    with tf.name_scope("fc_1") as scope:
        image_size = IMAGE_SIZE / 4
        h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
        W_fc1 = utils.weight_variable([image_size * image_size * 64, 256])
        b_fc1 = utils.bias_variable([256])
        tf.histogram_summary("W_fc1", W_fc1)
        tf.histogram_summary("b_fc1", b_fc1)
        h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)
        h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
    with tf.name_scope("fc_2") as scope:
        W_fc2 = utils.weight_variable([256, NUM_LABELS])
        b_fc2 = utils.bias_variable([NUM_LABELS])
        tf.histogram_summary("W_fc2", W_fc2)
        tf.histogram_summary("b_fc2", b_fc2)
        pred = tf.matmul(h_fc1, W_fc2) + b_fc2

    return pred
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:40,代碼來源:EmotionDetector.py

示例8: inference_fully_convolutional

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference_fully_convolutional(dataset):
    '''
    Fully convolutional inference on notMNIST dataset
    :param datset: [batch_size, 28*28*1] tensor
    :return: logits
    '''
    dataset_reshaped = tf.reshape(dataset, [-1, 28, 28, 1])
    with tf.name_scope("conv1") as scope:
        W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 1, 32], name="W_conv1")
        b_conv1 = utils.bias_variable([32], name="b_conv1")
        h_conv1 = tf.nn.relu(utils.conv2d_strided(dataset_reshaped, W_conv1, b_conv1))

    with tf.name_scope("conv2") as scope:
        W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
        b_conv2 = utils.bias_variable([64], name="b_conv2")
        h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))

    with tf.name_scope("conv3") as scope:
        W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
        b_conv3 = utils.bias_variable([128], name="b_conv3")
        h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))

    with tf.name_scope("conv4") as scope:
        W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
        b_conv4 = utils.bias_variable([256], name="b_conv4")
        h_conv4 = tf.nn.relu(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))

    with tf.name_scope("conv5") as scope:
        # W_conv5 = utils.weight_variable_xavier_initialized([2, 2, 256, 512], name="W_conv5")
        # b_conv5 = utils.bias_variable([512], name="b_conv5")
        # h_conv5 = tf.nn.relu(utils.conv2d_strided(h_conv4, W_conv5, b_conv5))
        h_conv5 = utils.avg_pool_2x2(h_conv4)

    with tf.name_scope("conv6") as scope:
        W_conv6 = utils.weight_variable_xavier_initialized([1, 1, 256, 10], name="W_conv6")
        b_conv6 = utils.bias_variable([10], name="b_conv6")
        logits = tf.nn.relu(utils.conv2d_basic(h_conv5, W_conv6, b_conv6))
        print logits.get_shape()
        logits = tf.reshape(logits, [-1, 10])
    return logits
開發者ID:RosieCampbell,項目名稱:TensorflowProjects,代碼行數:42,代碼來源:notMNISTFullyConvultional.py

示例9: vgg_net

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def vgg_net(weights, image):
    layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )

    net = {}
    current = image
    for i, name in enumerate(layers):
        if name in ['conv3_4', 'relu3_4', 'conv4_4', 'relu4_4', 'conv5_4', 'relu5_4']:
            continue
        kind = name[:4]
        if kind == 'conv':
            kernels, bias = weights[i][0][0][0][0]
            # matconvnet: weights are [width, height, in_channels, out_channels]
            # tensorflow: weights are [height, width, in_channels, out_channels]
            kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
            bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
            current = utils.conv2d_basic(current, kernels, bias)
        elif kind == 'relu':
            current = tf.nn.relu(current, name=name)
            if FLAGS.debug:
                utils.add_activation_summary(current)
        elif kind == 'pool':
            current = utils.avg_pool_2x2(current)
        net[name] = current

    return net
開發者ID:Selimam,項目名稱:AutoPortraitMatting,代碼行數:40,代碼來源:FCN.py

示例10: inference

# 需要導入模塊: import TensorflowUtils [as 別名]
# 或者: from TensorflowUtils import conv2d_basic [as 別名]
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    #processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)

        annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
開發者ID:Selimam,項目名稱:AutoPortraitMatting,代碼行數:68,代碼來源:FCN.py


注:本文中的TensorflowUtils.conv2d_basic方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。