本文整理汇总了Python中nets.pix2pix.pix2pix_generator方法的典型用法代码示例。如果您正苦于以下问题:Python pix2pix.pix2pix_generator方法的具体用法?Python pix2pix.pix2pix_generator怎么用?Python pix2pix.pix2pix_generator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nets.pix2pix
的用法示例。
在下文中一共展示了pix2pix.pix2pix_generator方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_output_size_nn_upsample_conv
# 需要导入模块: from nets import pix2pix [as 别名]
# 或者: from nets.pix2pix import pix2pix_generator [as 别名]
def test_output_size_nn_upsample_conv(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
示例2: test_output_size_conv2d_transpose
# 需要导入模块: from nets import pix2pix [as 别名]
# 或者: from nets.pix2pix import pix2pix_generator [as 别名]
def test_output_size_conv2d_transpose(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
示例3: test_block_number_dictates_number_of_layers
# 需要导入模块: from nets import pix2pix [as 别名]
# 或者: from nets.pix2pix import pix2pix_generator [as 别名]
def test_block_number_dictates_number_of_layers(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
blocks = [
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
num_encoder_layers = 0
num_decoder_layers = 0
for end_point in end_points:
if end_point.startswith('encoder'):
num_encoder_layers += 1
elif end_point.startswith('decoder'):
num_decoder_layers += 1
self.assertEqual(num_encoder_layers, len(blocks))
self.assertEqual(num_decoder_layers, len(blocks))