本文整理汇总了Python中TensorflowUtils.weight_variable方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.weight_variable方法的具体用法?Python TensorflowUtils.weight_variable怎么用?Python TensorflowUtils.weight_variable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TensorflowUtils
的用法示例。
在下文中一共展示了TensorflowUtils.weight_variable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: inference_simple
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference_simple(dataset):
with tf.name_scope("conv1") as scope:
W1 = utils.weight_variable([5, 5, 1, 32], name="W1")
b1 = utils.bias_variable([32], name="b1")
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h_conv1 = tf.nn.relu(utils.conv2d_basic(dataset, W1, b1), name="h_conv1")
h_pool1 = utils.max_pool_2x2(h_conv1)
with tf.name_scope("conv2") as scope:
W2 = utils.weight_variable([3, 3, 32, 64], name="W2")
b2 = utils.bias_variable([64], name="b2")
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h_conv2 = tf.nn.relu(utils.conv2d_basic(h_pool1, W2, b2), name="h_conv2")
h_pool2 = utils.max_pool_2x2(h_conv2)
with tf.name_scope("fc") as scope:
image_size = IMAGE_SIZE // 4
h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
W_fc = utils.weight_variable([image_size * image_size * 64, NUM_LABELS], name="W_fc")
b_fc = utils.bias_variable([NUM_LABELS], name="b_fc")
tf.histogram_summary("W_fc", W_fc)
tf.histogram_summary("b_fc", b_fc)
pred = tf.matmul(h_flat, W_fc) + b_fc
return pred
示例2: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference(data):
with tf.variable_scope("inference") as scope:
W_1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE * 50], name="W_1")
b_1 = utils.bias_variable([50], name="b_1")
h_1 = tf.nn.relu(tf.matmul(data, tf.reshape(W_1, [IMAGE_SIZE * IMAGE_SIZE, 50])) + b_1, name='h_1')
utils.add_activation_summary(h_1)
W_2 = utils.weight_variable([50 * 50], name="W_2")
b_2 = utils.bias_variable([50], name="b_2")
h_2 = tf.nn.relu(tf.matmul(h_1, tf.reshape(W_2, [50, 50])) + b_2, name='h_2')
utils.add_activation_summary(h_2)
W_3 = utils.weight_variable([50 * 50], name="W_3")
b_3 = utils.bias_variable([50], name="b_3")
h_3 = tf.nn.relu(tf.matmul(h_2, tf.reshape(W_3, [50, 50])) + b_3, name='h_3')
utils.add_activation_summary(h_3)
W_4 = utils.weight_variable([50 * 50], name="W_4")
b_4 = utils.bias_variable([50], name="b_4")
h_4 = tf.nn.relu(tf.matmul(h_3, tf.reshape(W_4, [50, 50])) + b_4, name='h_4')
utils.add_activation_summary(h_4)
W_final = utils.weight_variable([50 * 10], name="W_final")
b_final = utils.bias_variable([10], name="b_final")
pred = tf.nn.softmax(tf.matmul(h_4, tf.reshape(W_final, [50, 10])) + b_final, name='h_final')
# utils.add_activation_summary(pred)
return pred
示例3: encoder_conv
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def encoder_conv(image):
with tf.name_scope("enc_conv1") as scope:
W_conv1 = utils.weight_variable([3, 3, 3, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
h_conv1 = tf.nn.tanh(utils.conv2d_strided(image, W_conv1, b_conv1))
with tf.name_scope("enc_conv2") as scope:
W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))
with tf.name_scope("enc_conv3") as scope:
W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))
with tf.name_scope("enc_conv4") as scope:
W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
with tf.name_scope("enc_fc") as scope:
image_size = IMAGE_SIZE // 16
h_conv4_flatten = tf.reshape(h_conv4, [-1, image_size * image_size * 256])
W_fc5 = utils.weight_variable([image_size * image_size * 256, 512], name="W_fc5")
b_fc5 = utils.bias_variable([512], name="b_fc5")
encoder_val = tf.matmul(h_conv4_flatten, W_fc5) + b_fc5
return encoder_val
示例4: inference_conv
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference_conv(image):
# incomplete :/
image_reshaped = tf.reshape(image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
with tf.name_scope("conv1") as scope:
W_conv1 = utils.weight_variable([3, 3, 1, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
add_to_reg_loss_and_summary(W_conv1, b_conv1)
h_conv1 = tf.nn.tanh(utils.conv2d_basic(image_reshaped, W_conv1, b_conv1))
with tf.name_scope("conv2") as scope:
W_conv2 = utils.weight_variable([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
add_to_reg_loss_and_summary(W_conv2, b_conv2)
h_conv2 = tf.nn.tanh(utils.conv2d_strided(h_conv1, W_conv2, b_conv2))
with tf.name_scope("conv3") as scope:
W_conv3 = utils.weight_variable([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
add_to_reg_loss_and_summary(W_conv3, b_conv3)
h_conv3 = tf.nn.tanh(utils.conv2d_strided(h_conv2, W_conv3, b_conv3))
with tf.name_scope("conv4") as scope:
W_conv4 = utils.weight_variable([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
add_to_reg_loss_and_summary(W_conv4, b_conv4)
h_conv4 = tf.nn.tanh(utils.conv2d_strided(h_conv3, W_conv4, b_conv4))
示例5: generator
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def generator(z, train_mode):
with tf.variable_scope("generator") as scope:
W_0 = utils.weight_variable([FLAGS.z_dim, 64 * GEN_DIMENSION / 2 * IMAGE_SIZE / 16 * IMAGE_SIZE / 16],
name="W_0")
b_0 = utils.bias_variable([64 * GEN_DIMENSION / 2 * IMAGE_SIZE / 16 * IMAGE_SIZE / 16], name="b_0")
z_0 = tf.matmul(z, W_0) + b_0
h_0 = tf.reshape(z_0, [-1, IMAGE_SIZE / 16, IMAGE_SIZE / 16, 64 * GEN_DIMENSION / 2])
h_bn0 = utils.batch_norm(h_0, 64 * GEN_DIMENSION / 2, train_mode, scope="gen_bn0")
h_relu0 = tf.nn.relu(h_bn0, name='relu0')
utils.add_activation_summary(h_relu0)
# W_1 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION/2, 64 * GEN_DIMENSION], name="W_1")
# b_1 = utils.bias_variable([64 * GEN_DIMENSION/2], name="b_1")
# deconv_shape = tf.pack([tf.shape(h_relu0)[0], IMAGE_SIZE / 16, IMAGE_SIZE / 16, 64 * GEN_DIMENSION/2])
# h_conv_t1 = utils.conv2d_transpose_strided(h_relu0, W_1, b_1, output_shape=deconv_shape)
# h_bn1 = utils.batch_norm(h_conv_t1, 64 * GEN_DIMENSION/2, train_mode, scope="gen_bn1")
# h_relu1 = tf.nn.relu(h_bn1, name='relu1')
# utils.add_activation_summary(h_relu1)
W_2 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 4, 64 * GEN_DIMENSION / 2],
name="W_2")
b_2 = utils.bias_variable([64 * GEN_DIMENSION / 4], name="b_2")
deconv_shape = tf.pack([tf.shape(h_relu0)[0], IMAGE_SIZE / 8, IMAGE_SIZE / 8, 64 * GEN_DIMENSION / 4])
h_conv_t2 = utils.conv2d_transpose_strided(h_relu0, W_2, b_2, output_shape=deconv_shape)
h_bn2 = utils.batch_norm(h_conv_t2, 64 * GEN_DIMENSION / 4, train_mode, scope="gen_bn2")
h_relu2 = tf.nn.relu(h_bn2, name='relu2')
utils.add_activation_summary(h_relu2)
W_3 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 8, 64 * GEN_DIMENSION / 4],
name="W_3")
b_3 = utils.bias_variable([64 * GEN_DIMENSION / 8], name="b_3")
deconv_shape = tf.pack([tf.shape(h_relu2)[0], IMAGE_SIZE / 4, IMAGE_SIZE / 4, 64 * GEN_DIMENSION / 8])
h_conv_t3 = utils.conv2d_transpose_strided(h_relu2, W_3, b_3, output_shape=deconv_shape)
h_bn3 = utils.batch_norm(h_conv_t3, 64 * GEN_DIMENSION / 8, train_mode, scope="gen_bn3")
h_relu3 = tf.nn.relu(h_bn3, name='relu3')
utils.add_activation_summary(h_relu3)
W_4 = utils.weight_variable([5, 5, 64 * GEN_DIMENSION / 16, 64 * GEN_DIMENSION / 8],
name="W_4")
b_4 = utils.bias_variable([64 * GEN_DIMENSION / 16], name="b_4")
deconv_shape = tf.pack([tf.shape(h_relu3)[0], IMAGE_SIZE / 2, IMAGE_SIZE / 2, 64 * GEN_DIMENSION / 16])
h_conv_t4 = utils.conv2d_transpose_strided(h_relu3, W_4, b_4, output_shape=deconv_shape)
h_bn4 = utils.batch_norm(h_conv_t4, 64 * GEN_DIMENSION / 16, train_mode, scope="gen_bn4")
h_relu4 = tf.nn.relu(h_bn4, name='relu4')
utils.add_activation_summary(h_relu4)
W_5 = utils.weight_variable([5, 5, NUM_OF_CHANNELS, 64 * GEN_DIMENSION / 16], name="W_5")
b_5 = utils.bias_variable([NUM_OF_CHANNELS], name="b_5")
deconv_shape = tf.pack([tf.shape(h_relu4)[0], IMAGE_SIZE, IMAGE_SIZE, NUM_OF_CHANNELS])
h_conv_t5 = utils.conv2d_transpose_strided(h_relu4, W_5, b_5, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t5, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
示例6: inference_res
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference_res(input_image):
W1 = utils.weight_variable([3, 3, 3, 32])
b1 = utils.bias_variable([32])
hconv_1 = tf.nn.relu(utils.conv2d_basic(input_image, W1, b1))
h_norm = utils.local_response_norm(hconv_1)
bottleneck_1 = utils.bottleneck_unit(h_norm, 16, 16, down_stride=True, name="res_1")
bottleneck_2 = utils.bottleneck_unit(bottleneck_1, 8, 8, down_stride=True, name="res_2")
bottleneck_3 = utils.bottleneck_unit(bottleneck_2, 16, 16, up_stride=True, name="res_3")
bottleneck_4 = utils.bottleneck_unit(bottleneck_3, 32, 32, up_stride=True, name="res_4")
W5 = utils.weight_variable([3, 3, 32, 3])
b5 = utils.bias_variable([3])
out = tf.nn.tanh(utils.conv2d_basic(bottleneck_4, W5, b5))
return out
示例7: decoder_fc
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def decoder_fc(z):
with tf.variable_scope("decoder") as scope:
Wd_fc1 = utils.weight_variable([FLAGS.z_dim, 50], name="Wd_fc1")
bd_fc1 = utils.bias_variable([50], name="bd_fc1")
hd_relu1 = activation_function(tf.matmul(z, Wd_fc1) + bd_fc1, name="hdfc_1")
Wd_fc2 = utils.weight_variable([50, 50], name="Wd_fc2")
bd_fc2 = utils.bias_variable([50], name="bd_fc2")
hd_relu2 = activation_function(tf.matmul(hd_relu1, Wd_fc2) + bd_fc2, name="hdfc_2")
Wd_fc3 = utils.weight_variable([50, IMAGE_SIZE * IMAGE_SIZE], name="Wd_fc3")
bd_fc3 = utils.bias_variable([IMAGE_SIZE * IMAGE_SIZE], name="bd_fc3")
pred_image = tf.matmul(hd_relu2, Wd_fc3) + bd_fc3
return pred_image
示例8: encoder_fc
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def encoder_fc(images):
with tf.variable_scope("encoder") as scope:
W_fc1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE, 50], name="W_fc1")
b_fc1 = utils.bias_variable([50], name="b_fc1")
h_relu1 = activation_function(tf.matmul(images, W_fc1) + b_fc1, name="hfc_1")
W_fc2 = utils.weight_variable([50, 50], name="W_fc2")
b_fc2 = utils.bias_variable([50], name="b_fc2")
h_relu2 = activation_function(tf.matmul(h_relu1, W_fc2) + b_fc2, name="hfc_2")
W_fc3 = utils.weight_variable([50, FLAGS.z_dim], name="W_fc3")
b_fc3 = utils.bias_variable([FLAGS.z_dim], name="b_fc3")
mu = tf.add(tf.matmul(h_relu2, W_fc3), b_fc3, name="mu")
utils.add_activation_summary(mu)
W_fc4 = utils.weight_variable([50, FLAGS.z_dim], name="W_fc4")
b_fc4 = utils.bias_variable([FLAGS.z_dim], name="b_fc4")
log_var = tf.add(tf.matmul(h_relu2, W_fc4), b_fc4, name="log_var")
utils.add_activation_summary(log_var)
return mu, log_var
示例9: discriminator
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def discriminator(input_images, train_mode):
# dropout_prob = 1.0
# if train_mode:
# dropout_prob = 0.5
W_conv0 = utils.weight_variable([5, 5, NUM_OF_CHANNELS, 64 * 1], name="W_conv0")
b_conv0 = utils.bias_variable([64 * 1], name="b_conv0")
h_conv0 = utils.conv2d_strided(input_images, W_conv0, b_conv0)
h_bn0 = h_conv0 # utils.batch_norm(h_conv0, 64 * 1, train_mode, scope="disc_bn0")
h_relu0 = utils.leaky_relu(h_bn0, 0.2, name="h_relu0")
utils.add_activation_summary(h_relu0)
W_conv1 = utils.weight_variable([5, 5, 64 * 1, 64 * 2], name="W_conv1")
b_conv1 = utils.bias_variable([64 * 2], name="b_conv1")
h_conv1 = utils.conv2d_strided(h_relu0, W_conv1, b_conv1)
h_bn1 = utils.batch_norm(h_conv1, 64 * 2, train_mode, scope="disc_bn1")
h_relu1 = utils.leaky_relu(h_bn1, 0.2, name="h_relu1")
utils.add_activation_summary(h_relu1)
W_conv2 = utils.weight_variable([5, 5, 64 * 2, 64 * 4], name="W_conv2")
b_conv2 = utils.bias_variable([64 * 4], name="b_conv2")
h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
h_bn2 = utils.batch_norm(h_conv2, 64 * 4, train_mode, scope="disc_bn2")
h_relu2 = utils.leaky_relu(h_bn2, 0.2, name="h_relu2")
utils.add_activation_summary(h_relu2)
W_conv3 = utils.weight_variable([5, 5, 64 * 4, 64 * 8], name="W_conv3")
b_conv3 = utils.bias_variable([64 * 8], name="b_conv3")
h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
h_bn3 = utils.batch_norm(h_conv3, 64 * 8, train_mode, scope="disc_bn3")
h_relu3 = utils.leaky_relu(h_bn3, 0.2, name="h_relu3")
utils.add_activation_summary(h_relu3)
shape = h_relu3.get_shape().as_list()
h_3 = tf.reshape(h_relu3, [FLAGS.batch_size, (IMAGE_SIZE // 16) * (IMAGE_SIZE // 16) * shape[3]])
W_4 = utils.weight_variable([h_3.get_shape().as_list()[1], 1], name="W_4")
b_4 = utils.bias_variable([1], name="b_4")
h_4 = tf.matmul(h_3, W_4) + b_4
return tf.nn.sigmoid(h_4), h_4, h_relu3
示例10: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference(data, keep_prob):
with tf.variable_scope("inference") as scope:
weight_variable_size = IMAGE_SIZE * IMAGE_SIZE * 50 + 50 * 50 * 3 + 50 * 10
bias_variable_size = 4 * 50 + 10
print (weight_variable_size + bias_variable_size)
variable = utils.weight_variable([weight_variable_size + bias_variable_size], name="variables")
weight_variable = tf.slice(variable, [0], [weight_variable_size], name="weights")
bias_variable = tf.slice(variable, [weight_variable_size], [bias_variable_size], name="biases")
weight_offset = 0
bias_offset = 0
W_1 = tf.slice(weight_variable, [weight_offset], [IMAGE_SIZE * IMAGE_SIZE * 50], name="W_1")
b_1 = tf.slice(bias_variable, [bias_offset], [50], name="b_1")
h_1_relu = tf.nn.relu(tf.matmul(data, tf.reshape(W_1, [IMAGE_SIZE * IMAGE_SIZE, 50])) + b_1, name='h_1')
h_1 = tf.nn.dropout(h_1_relu, keep_prob)
utils.add_activation_summary(h_1)
weight_offset += IMAGE_SIZE * IMAGE_SIZE * 50
bias_offset += 50
W_2 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_2")
b_2 = tf.slice(bias_variable, [bias_offset], [50], name="b_2")
h_2_relu = tf.nn.relu(tf.matmul(h_1, tf.reshape(W_2, [50, 50])) + b_2, name='h_2')
h_2 = tf.nn.dropout(h_2_relu, keep_prob)
utils.add_activation_summary(h_2)
weight_offset += 50 * 50
bias_offset += 50
W_3 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_3")
b_3 = tf.slice(bias_variable, [bias_offset], [50], name="b_3")
h_3_relu = tf.nn.relu(tf.matmul(h_2, tf.reshape(W_3, [50, 50])) + b_3, name='h_3')
h_3 = tf.nn.dropout(h_3_relu, keep_prob)
utils.add_activation_summary(h_3)
weight_offset += 50 * 50
bias_offset += 50
W_4 = tf.slice(weight_variable, [weight_offset], [50 * 50], name="W_4")
b_4 = tf.slice(bias_variable, [bias_offset], [50], name="b_4")
h_4_relu = tf.nn.relu(tf.matmul(h_3, tf.reshape(W_4, [50, 50])) + b_4, name='h_4')
h_4 = tf.nn.dropout(h_4_relu, keep_prob)
utils.add_activation_summary(h_4)
weight_offset += 50 * 50
bias_offset += 50
W_final = tf.slice(weight_variable, [weight_offset], [50 * 10], name="W_final")
b_final = tf.slice(bias_variable, [bias_offset], [10], name="b_final")
pred = tf.nn.softmax(tf.matmul(h_4, tf.reshape(W_final, [50, 10])) + b_final, name='h_final')
# utils.add_activation_summary(pred)
return pred
示例11: inferece
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inferece(dataset, prob):
with tf.name_scope("conv1") as scope:
W_conv1 = utils.weight_variable([5, 5, 1, 32])
b_conv1 = utils.bias_variable([32])
tf.histogram_summary("W_conv1", W_conv1)
tf.histogram_summary("b_conv1", b_conv1)
h_conv1 = utils.conv2d_basic(dataset, W_conv1, b_conv1)
h_1 = tf.nn.relu(h_conv1)
h_pool1 = utils.max_pool_2x2(h_1)
add_to_regularization_loss(W_conv1, b_conv1)
with tf.name_scope("conv2") as scope:
W_conv2 = utils.weight_variable([3, 3, 32, 64])
b_conv2 = utils.bias_variable([64])
tf.histogram_summary("W_conv2", W_conv2)
tf.histogram_summary("b_conv2", b_conv2)
h_conv2 = utils.conv2d_basic(h_pool1, W_conv2, b_conv2)
h_2 = tf.nn.relu(h_conv2)
h_pool2 = utils.max_pool_2x2(h_2)
add_to_regularization_loss(W_conv2, b_conv2)
with tf.name_scope("fc_1") as scope:
image_size = IMAGE_SIZE / 4
h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
W_fc1 = utils.weight_variable([image_size * image_size * 64, 256])
b_fc1 = utils.bias_variable([256])
tf.histogram_summary("W_fc1", W_fc1)
tf.histogram_summary("b_fc1", b_fc1)
h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)
h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
with tf.name_scope("fc_2") as scope:
W_fc2 = utils.weight_variable([256, NUM_LABELS])
b_fc2 = utils.bias_variable([NUM_LABELS])
tf.histogram_summary("W_fc2", W_fc2)
tf.histogram_summary("b_fc2", b_fc2)
pred = tf.matmul(h_fc1, W_fc2) + b_fc2
return pred
示例12: inference
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def inference(dataset):
with tf.name_scope("conv1") as scope:
W1 = utils.weight_variable([5, 5, 1, 32], name="W1")
b1 = utils.bias_variable([32], name="b1")
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h_conv1 = utils.conv2d_basic(dataset, W1, b1)
h_norm1 = utils.local_response_norm(h_conv1)
h_1 = tf.nn.relu(h_norm1, name="conv1")
h_pool1 = utils.max_pool_2x2(h_1)
with tf.name_scope("conv2") as scope:
W2 = utils.weight_variable([3, 3, 32, 64], name="W2")
b2 = utils.bias_variable([64], name="b2")
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h_conv2 = utils.conv2d_basic(h_pool1, W2, b2)
h_norm2 = utils.local_response_norm(h_conv2)
h_2 = tf.nn.relu(h_norm2, name="conv2")
h_pool2 = utils.max_pool_2x2(h_2)
with tf.name_scope("conv3") as scope:
W3 = utils.weight_variable([3, 3, 64, 128], name="W3")
b3 = utils.bias_variable([128], name="b3")
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
h_conv3 = utils.conv2d_basic(h_pool2, W3, b3)
h_norm3 = utils.local_response_norm(h_conv3)
h_3 = tf.nn.relu(h_norm3, name="conv3")
h_pool3 = utils.max_pool_2x2(h_3)
with tf.name_scope("conv4") as scope:
W4 = utils.weight_variable([3, 3, 128, 256], name="W4")
b4 = utils.bias_variable([256], name="b4")
tf.histogram_summary("W4", W4)
tf.histogram_summary("b4", b4)
h_conv4 = utils.conv2d_basic(h_pool3, W4, b4)
h_norm4 = utils.local_response_norm(h_conv4)
h_4 = tf.nn.relu(h_norm4, name="conv4")
with tf.name_scope("fc1") as scope:
image_size = IMAGE_SIZE // 8
h_flat = tf.reshape(h_4, [-1, image_size * image_size * 256])
W_fc1 = utils.weight_variable([image_size * image_size * 256, 512], name="W_fc1")
b_fc1 = utils.bias_variable([512], name="b_fc1")
tf.histogram_summary("W_fc1", W_fc1)
tf.histogram_summary("b_fc1", b_fc1)
h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)
with tf.name_scope("fc2") as scope:
W_fc2 = utils.weight_variable([512, NUM_LABELS], name="W_fc2")
b_fc2 = utils.bias_variable([NUM_LABELS], name="b_fc2")
tf.histogram_summary("W_fc2", W_fc2)
tf.histogram_summary("b_fc2", b_fc2)
pred = tf.matmul(h_fc1, W_fc2) + b_fc2
return pred
示例13: decoder_conv
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def decoder_conv(embedding):
image_size = IMAGE_SIZE // 16
with tf.name_scope("dec_fc") as scope:
W_fc1 = utils.weight_variable([512, image_size * image_size * 256], name="W_fc1")
b_fc1 = utils.bias_variable([image_size * image_size * 256], name="b_fc1")
h_fc1 = tf.nn.relu(tf.matmul(embedding, W_fc1) + b_fc1)
with tf.name_scope("dec_conv1") as scope:
h_reshaped = tf.reshape(h_fc1, tf.pack([tf.shape(h_fc1)[0], image_size, image_size, 256]))
W_conv_t1 = utils.weight_variable([3, 3, 128, 256], name="W_conv_t1")
b_conv_t1 = utils.bias_variable([128], name="b_conv_t1")
deconv_shape = tf.pack([tf.shape(h_fc1)[0], 2 * image_size, 2 * image_size, 128])
h_conv_t1 = tf.nn.relu(
utils.conv2d_transpose_strided(h_reshaped, W_conv_t1, b_conv_t1, output_shape=deconv_shape))
with tf.name_scope("dec_conv2") as scope:
W_conv_t2 = utils.weight_variable([3, 3, 64, 128], name="W_conv_t2")
b_conv_t2 = utils.bias_variable([64], name="b_conv_t2")
deconv_shape = tf.pack([tf.shape(h_conv_t1)[0], 4 * image_size, 4 * image_size, 64])
h_conv_t2 = tf.nn.relu(
utils.conv2d_transpose_strided(h_conv_t1, W_conv_t2, b_conv_t2, output_shape=deconv_shape))
with tf.name_scope("dec_conv3") as scope:
W_conv_t3 = utils.weight_variable([3, 3, 32, 64], name="W_conv_t3")
b_conv_t3 = utils.bias_variable([32], name="b_conv_t3")
deconv_shape = tf.pack([tf.shape(h_conv_t2)[0], 8 * image_size, 8 * image_size, 32])
h_conv_t3 = tf.nn.relu(
utils.conv2d_transpose_strided(h_conv_t2, W_conv_t3, b_conv_t3, output_shape=deconv_shape))
with tf.name_scope("dec_conv4") as scope:
W_conv_t4 = utils.weight_variable([3, 3, 3, 32], name="W_conv_t4")
b_conv_t4 = utils.bias_variable([3], name="b_conv_t4")
deconv_shape = tf.pack([tf.shape(h_conv_t3)[0], IMAGE_SIZE, IMAGE_SIZE, 3])
pred_image = utils.conv2d_transpose_strided(h_conv_t3, W_conv_t4, b_conv_t4, output_shape=deconv_shape)
return pred_image
示例14: main
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
invert_image = get_image(FLAGS.image_path)
print invert_image.shape
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
processed_image = utils.process_image(invert_image, mean_pixel).astype(np.float32)
weights = np.squeeze(model_data['layers'])
invert_net = vgg_net(weights, processed_image)
dummy_image = utils.weight_variable(invert_image.shape, stddev=np.std(invert_image) * 0.1)
tf.histogram_summary("Image Output", dummy_image)
image_net = vgg_net(weights, dummy_image)
with tf.Session() as sess:
invert_layer_features = invert_net[INVERT_LAYER].eval()
loss = 2 * tf.nn.l2_loss(image_net[INVERT_LAYER] - invert_layer_features) / invert_layer_features.size
tf.scalar_summary("Loss", loss)
summary_op = tf.merge_all_summaries()
train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
best_loss = float('inf')
best = None
summary_writer = tf.train.SummaryWriter(FLAGS.log_dir)
sess.run(tf.initialize_all_variables())
for i in range(1, MAX_ITERATIONS):
train_op.run()
if i % 10 == 0 or i == MAX_ITERATIONS - 1:
this_loss = loss.eval()
print('Step %d' % (i)),
print(' total loss: %g' % this_loss)
summary_writer.add_summary(summary_op.eval(), global_step=i)
if this_loss < best_loss:
best_loss = this_loss
best = dummy_image.eval()
output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
scipy.misc.imsave("invert_check.png", output)
output = utils.unprocess_image(best.reshape(invert_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output.png", output)
示例15: encoder
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import weight_variable [as 别名]
def encoder(dataset, train_mode):
with tf.variable_scope("Encoder"):
with tf.name_scope("enc_conv1") as scope:
W_conv1 = utils.weight_variable_xavier_initialized([3, 3, 3, 32], name="W_conv1")
b_conv1 = utils.bias_variable([32], name="b_conv1")
h_conv1 = utils.conv2d_strided(dataset, W_conv1, b_conv1)
h_bn1 = utils.batch_norm(h_conv1, 32, train_mode, scope="conv1_bn")
h_relu1 = tf.nn.relu(h_bn1)
with tf.name_scope("enc_conv2") as scope:
W_conv2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64], name="W_conv2")
b_conv2 = utils.bias_variable([64], name="b_conv2")
h_conv2 = utils.conv2d_strided(h_relu1, W_conv2, b_conv2)
h_bn2 = utils.batch_norm(h_conv2, 64, train_mode, scope="conv2_bn")
h_relu2 = tf.nn.relu(h_bn2)
with tf.name_scope("enc_conv3") as scope:
W_conv3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128], name="W_conv3")
b_conv3 = utils.bias_variable([128], name="b_conv3")
h_conv3 = utils.conv2d_strided(h_relu2, W_conv3, b_conv3)
h_bn3 = utils.batch_norm(h_conv3, 128, train_mode, scope="conv3_bn")
h_relu3 = tf.nn.relu(h_bn3)
with tf.name_scope("enc_conv4") as scope:
W_conv4 = utils.weight_variable_xavier_initialized([3, 3, 128, 256], name="W_conv4")
b_conv4 = utils.bias_variable([256], name="b_conv4")
h_conv4 = utils.conv2d_strided(h_relu3, W_conv4, b_conv4)
h_bn4 = utils.batch_norm(h_conv4, 256, train_mode, scope="conv4_bn")
h_relu4 = tf.nn.relu(h_bn4)
with tf.name_scope("enc_conv5") as scope:
W_conv5 = utils.weight_variable_xavier_initialized([3, 3, 256, 512], name="W_conv5")
b_conv5 = utils.bias_variable([512], name="b_conv5")
h_conv5 = utils.conv2d_strided(h_relu4, W_conv5, b_conv5)
h_bn5 = utils.batch_norm(h_conv5, 512, train_mode, scope="conv5_bn")
h_relu5 = tf.nn.relu(h_bn5)
with tf.name_scope("enc_fc") as scope:
image_size = IMAGE_SIZE // 32
h_relu5_flatten = tf.reshape(h_relu5, [-1, image_size * image_size * 512])
W_fc = utils.weight_variable([image_size * image_size * 512, 1024], name="W_fc")
b_fc = utils.bias_variable([1024], name="b_fc")
encoder_val = tf.matmul(h_relu5_flatten, W_fc) + b_fc
return encoder_val