本文整理汇总了Python中tensorflow.contrib.layers.conv2d_transpose方法的典型用法代码示例。如果您正苦于以下问题:Python layers.conv2d_transpose方法的具体用法?Python layers.conv2d_transpose怎么用?Python layers.conv2d_transpose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.conv2d_transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decoder
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def decoder(input_tensor):
'''Create decoder network.
If input tensor is provided then decodes it, otherwise samples from
a sampled vector.
Args:
input_tensor: a batch of vectors to decode
Returns:
A tensor that expresses the decoder network
'''
net = tf.expand_dims(input_tensor, 1)
net = tf.expand_dims(net, 1)
net = layers.conv2d_transpose(net, 128, 3, padding='VALID')
net = layers.conv2d_transpose(net, 64, 5, padding='VALID')
net = layers.conv2d_transpose(net, 32, 5, stride=2)
net = layers.conv2d_transpose(
net, 1, 5, stride=2, activation_fn=tf.nn.sigmoid)
net = layers.flatten(net)
return net
示例2: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, deconv_info, is_train, name="deconv2d", info=False,
stddev=0.01, activation_fn=tf.nn.relu, norm='batch'):
with tf.variable_scope(name):
output_shape = deconv_info[0]
k = deconv_info[1]
s = deconv_info[2]
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
_ = bn_act(_, is_train, norm=norm, activation_fn=activation_fn)
if info: log.info('{} {}'.format(name, _.get_shape().as_list()))
return _
示例3: __call__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def __call__(self, i):
with tf.variable_scope(self.name):
if self.reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
self.reuse = True
g = tcl.fully_connected(i, self.size * self.size * 1024, activation_fn=tf.nn.relu,
normalizer_fn=tcl.batch_norm)
g = tf.reshape(g, (-1, self.size, self.size, 1024)) # size
g = tcl.conv2d_transpose(g, 512, 3, stride=2, # size*2
activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*4
activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*8
activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16
activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
return g
return x
示例4: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, deconv_info, is_train, name="deconv2d", info=False,
stddev=0.01, activation_fn=tf.nn.relu, batch_norm=True):
with tf.variable_scope(name):
output_shape = deconv_info[0]
k = deconv_info[1]
s = deconv_info[2]
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
_ = bn_act(_, is_train, batch_norm=batch_norm, activation_fn=activation_fn)
if info: log.info('{} {}'.format(name, _))
return _
示例5: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, deconv_info, is_train, name="deconv2d", stddev=0.02,activation_fn='relu'):
with tf.variable_scope(name):
output_shape = deconv_info[0]
k = deconv_info[1]
s = deconv_info[2]
deconv = layers.conv2d_transpose(input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
kernel_size=[k, k], stride=[s, s], padding='VALID')
if activation_fn == 'relu':
deconv = tf.nn.relu(deconv)
bn = tf.contrib.layers.batch_norm(deconv, center=True, scale=True,
decay=0.9, is_training=is_train, updates_collections=None)
elif activation_fn == 'tanh':
deconv = tf.nn.tanh(deconv)
return deconv
示例6: autoencoder
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def autoencoder(inputs):
# encoder
# 32 x 32 x 1 -> 16 x 16 x 32
# 16 x 16 x 32 -> 8 x 8 x 16
# 8 x 8 x 16 -> 2 x 2 x 8
net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
# decoder
# 2 x 2 x 8 -> 8 x 8 x 16
# 8 x 8 x 16 -> 16 x 16 x 32
# 16 x 16 x 32 -> 32 x 32 x 1
net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
return net
# read MNIST dataset
示例7: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, output_shape, is_train, info=False, k=3, s=2, stddev=0.01,
activation_fn=tf.nn.relu, norm='batch', name='deconv2d'):
with tf.variable_scope(name):
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
activation_fn=None,
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
_ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
if info: print_info(name, _.get_shape().as_list(), activation_fn)
return _
示例8: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, deconv_info, is_train, name="deconv2d",
stddev=0.02, activation_fn=tf.nn.relu, batch_norm=True):
with tf.variable_scope(name):
output_shape = deconv_info[0]
k = deconv_info[1]
s = deconv_info[2]
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
return bn_act(_, is_train, batch_norm=batch_norm, activation_fn=activation_fn)
示例9: test_get_input_activation2
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def test_get_input_activation2(self, rank, fn, op_name):
g = tf.get_default_graph()
inputs = tf.zeros([6] * rank)
with arg_scope([
layers.conv2d, layers.conv2d_transpose, layers.separable_conv2d,
layers.conv3d
],
scope='test_layer'):
_ = fn(inputs)
for op in g.get_operations():
print(op.name)
self.assertEqual(
inputs,
cc.get_input_activation(
g.get_operation_by_name('test_layer/' + op_name)))
示例10: testOpAssumptions
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def testOpAssumptions(self):
# Verify that op assumptions are true. For example, verify that specific
# inputs are at expected indices.
conv_transpose = layers.conv2d_transpose(
self.batch_norm_op.outputs[0], num_outputs=8, kernel_size=3,
scope='conv_transpose')
layers.separable_conv2d(
conv_transpose, num_outputs=9, kernel_size=3, scope='dwise_conv')
layers.fully_connected(tf.zeros([1, 7]), 10, scope='fc')
g = tf.get_default_graph()
# Verify that FusedBatchNormV3 has gamma as inputs[1].
self.assertEqual('conv1/BatchNorm/gamma/read:0',
self.batch_norm_op.inputs[1].name)
# Verify that Conv2D has weights at expected index.
index = op_handler_util.WEIGHTS_INDEX_DICT[self.conv_op.type]
self.assertEqual('conv1/weights/read:0',
self.conv_op.inputs[index].name)
# Verify that Conv2DBackpropInput has weights at expected index.
conv_transpose_op = g.get_operation_by_name(
'conv_transpose/conv2d_transpose')
index = op_handler_util.WEIGHTS_INDEX_DICT[conv_transpose_op.type]
self.assertEqual('conv_transpose/weights/read:0',
conv_transpose_op.inputs[index].name)
# Verify that DepthwiseConv2dNative has weights at expected index.
depthwise_conv_op = g.get_operation_by_name(
'dwise_conv/separable_conv2d/depthwise')
index = op_handler_util.WEIGHTS_INDEX_DICT[depthwise_conv_op.type]
self.assertEqual('dwise_conv/depthwise_weights/read:0',
depthwise_conv_op.inputs[index].name)
# Verify that MatMul has weights at expected index.
matmul_op = g.get_operation_by_name('fc/MatMul')
index = op_handler_util.WEIGHTS_INDEX_DICT[matmul_op.type]
self.assertEqual('fc/weights/read:0',
matmul_op.inputs[index].name)
示例11: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def __init__(self, hidden_size, batch_size, learning_rate):
self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])
with arg_scope([layers.conv2d, layers.conv2d_transpose],
activation_fn=concat_elu,
normalizer_fn=layers.batch_norm,
normalizer_params={'scale': True}):
with tf.variable_scope("model"):
D1 = discriminator(self.input_tensor) # positive examples
D_params_num = len(tf.trainable_variables())
G = decoder(tf.random_normal([batch_size, hidden_size]))
self.sampled_tensor = G
with tf.variable_scope("model", reuse=True):
D2 = discriminator(G) # generated examples
D_loss = self.__get_discrinator_loss(D1, D2)
G_loss = self.__get_generator_loss(D2)
params = tf.trainable_variables()
D_params = params[:D_params_num]
G_params = params[D_params_num:]
# train_discrimator = optimizer.minimize(loss=D_loss, var_list=D_params)
# train_generator = optimizer.minimize(loss=G_loss, var_list=G_params)
global_step = tf.contrib.framework.get_or_create_global_step()
self.train_discrimator = layers.optimize_loss(
D_loss, global_step, learning_rate / 10, 'Adam', variables=D_params, update_ops=[])
self.train_generator = layers.optimize_loss(
G_loss, global_step, learning_rate, 'Adam', variables=G_params, update_ops=[])
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
示例12: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def __init__(self, hidden_size, batch_size, learning_rate):
self.input_tensor = tf.placeholder(
tf.float32, [None, 28 * 28])
with arg_scope([layers.conv2d, layers.conv2d_transpose],
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params={'scale': True}):
with tf.variable_scope("model") as scope:
encoded = encoder(self.input_tensor, hidden_size * 2)
mean = encoded[:, :hidden_size]
stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))
epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
input_sample = mean + epsilon * stddev
output_tensor = decoder(input_sample)
with tf.variable_scope("model", reuse=True) as scope:
self.sampled_tensor = decoder(tf.random_normal(
[batch_size, hidden_size]))
vae_loss = self.__get_vae_cost(mean, stddev)
rec_loss = self.__get_reconstruction_cost(
output_tensor, self.input_tensor)
loss = vae_loss + rec_loss
self.train = layers.optimize_loss(loss, tf.contrib.framework.get_or_create_global_step(
), learning_rate=learning_rate, optimizer='Adam', update_ops=[])
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
示例13: test_default_arg_scope_has_conv2d_transpose_op
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
self.assertTrue(self._get_scope_key(layers.conv2d_transpose) in scope)
示例14: deconv2d
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def deconv2d(input, output_shape, is_train, info=False, k=3, s=2, stddev=0.01,
activation_fn=tf.nn.relu, norm='batch', name='deconv2d'):
with tf.variable_scope(name):
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
activation_fn=None,
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
_ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
if info: print_info(name, _.get_shape().as_list(), activation_fn)
return _
示例15: generator
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import conv2d_transpose [as 别名]
def generator(z):
with tf.variable_scope('generator'):
z = layers.fully_connected(z, num_outputs=4096)
z = tf.reshape(z, [-1, 4, 4, 256])
z = layers.conv2d_transpose(z, num_outputs=128, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=64, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=1, kernel_size=5, stride=2,
activation_fn=tf.nn.sigmoid)
return z[:, 2:-2, 2:-2, :]