本文整理汇总了Python中ops.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python ops.conv2d方法的具体用法?Python ops.conv2d怎么用?Python ops.conv2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ops
的用法示例。
在下文中一共展示了ops.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def __call__(self, input):
with tf.variable_scope(self.name, reuse=self._reuse):
if not self._reuse:
print('\033[93m'+self.name+'\033[0m')
_ = input
num_channel = [32, 64, 128, 256, 256, 512]
num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int)
for i in range(num_layer):
ch = num_channel[i] if i < len(num_channel) else 512
_ = conv2d(_, ch, self._is_train, info=not self._reuse,
norm=self._norm_type, name='conv{}'.format(i+1))
_ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
info=not self._reuse, norm='None', name='conv{}'.format(i+2))
_ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse,
activation_fn=None, norm='None',
name='conv{}'.format(i+3))
_ = tf.squeeze(_)
if not self._reuse:
log.info('discriminator output {}'.format(_.shape.as_list()))
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return tf.nn.sigmoid(_), _
示例2: discriminator
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator(self, opts, input_, is_training,
prefix='DISCRIMINATOR', reuse=False):
"""Discriminator function, suitable for simple toy experiments.
"""
num_filters = opts['d_num_filters']
with tf.variable_scope(prefix, reuse=reuse):
h0 = ops.conv2d(opts, input_, num_filters, scope='h0_conv')
h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
h0 = ops.lrelu(h0)
h1 = ops.conv2d(opts, h0, num_filters * 2, scope='h1_conv')
h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
h1 = ops.lrelu(h1)
h2 = ops.conv2d(opts, h1, num_filters * 4, scope='h2_conv')
h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
h2 = ops.lrelu(h2)
h3 = ops.linear(opts, h2, 1, scope='h3_lin')
return h3
示例3: discriminator
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator(self, opts, input_, is_training,
prefix='DISCRIMINATOR', reuse=False):
"""Encoder function, suitable for simple toy experiments.
"""
num_filters = opts['d_num_filters']
with tf.variable_scope(prefix, reuse=reuse):
h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
h0 = tf.nn.relu(h0)
h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
h1 = tf.nn.relu(h1)
h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
h2 = tf.nn.relu(h2)
h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
h3 = tf.nn.relu(h3)
# Already has NaNs!!
latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')
return latent_mean, log_latent_sigmas
示例4: discriminator
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator(self, image, y=None, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
s = self.output_size
if np.mod(s, 16) == 0:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
else:
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = linear(tf.reshape(h1, [self.batch_size, -1]), 1, 'd_h2_lin')
if not self.config.use_kernel:
return tf.nn.sigmoid(h2), h2
else:
return tf.nn.sigmoid(h2), h2, h1, h0
示例5: discriminator_labeler
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits, variables
示例6: discriminator_gen_labeler
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits,variables
示例7: discriminator_on_z
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator_on_z(image, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
D_labels = tf.nn.tanh(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels,variables
示例8: discriminate
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminate(self, x_var, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(conv2d(x_var, output_dim=64, name='dis_conv1'))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='dis_conv2'), scope='dis_bn1'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='dis_conv3'), scope='dis_bn2'))
conv4 = conv2d(conv3, output_dim=512, name='dis_conv4')
middle_conv = conv4
conv4 = lrelu(instance_norm(conv4, scope='dis_bn3'))
conv5 = lrelu(instance_norm(conv2d(conv4, output_dim=1024, name='dis_conv5'), scope='dis_bn4'))
conv6 = conv2d(conv5, output_dim=2, k_w=4, k_h=4, d_h=1, d_w=1, padding='VALID', name='dis_conv6')
return conv6, middle_conv
示例9: encode_decode_1
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def encode_decode_1(self, x, reuse=False):
with tf.variable_scope("encode_decode_1") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1'))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4'))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5'))
x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde1
示例10: encode_decode_2
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def encode_decode_2(self, x, reuse=False):
with tf.variable_scope("encode_decode_2") as scope:
if reuse == True:
scope.reuse_variables()
conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1',
))
conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2'))
conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3'))
# for x_{1}
de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128]
, name='e_d1', k_h=3, k_w=3), scope='e_in4',
))
de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64]
, name='e_d2', k_w=3, k_h=3), scope='e_in5',
))
x_tilde = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4')
return x_tilde
示例11: _create_discriminator
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def _create_discriminator(self, x, train=True, reuse=False, name="discriminator"):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
h = x
for i in range(self.num_conv_layers):
h = lrelu(batch_norm(conv2d(h, self.num_dis_feature_maps * (2 ** i),
stddev=0.02, name="d_h{}_conv".format(i)),
is_training=train,
scope="d_bn{}".format(i)))
dim = h.get_shape()[1:].num_elements()
h = tf.reshape(h, [-1, dim])
d_bin_logits = linear(h, 1, scope='d_bin_logits')
d_mul_logits = linear(h, self.num_gens, scope='d_mul_logits')
return d_bin_logits, d_mul_logits
示例12: dcgan_encoder
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def dcgan_encoder(opts, inputs, is_training=False, reuse=False):
num_units = opts['e_num_filters']
num_layers = opts['e_num_layers']
layer_x = inputs
for i in xrange(num_layers):
scale = 2**(num_layers - i - 1)
layer_x = ops.conv2d(opts, layer_x, num_units / scale,
scope='h%d_conv' % i)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training,
reuse, scope='h%d_bn' % i)
layer_x = tf.nn.relu(layer_x)
if opts['e_noise'] != 'gaussian':
res = ops.linear(opts, layer_x, opts['zdim'], scope='hfinal_lin')
return res
else:
mean = ops.linear(opts, layer_x, opts['zdim'], scope='mean_lin')
log_sigmas = ops.linear(opts, layer_x,
opts['zdim'], scope='log_sigmas_lin')
return mean, log_sigmas
示例13: discriminator
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def discriminator(self, x, reuse=False):
with tf.variable_scope("discriminator", reuse=reuse):
def residual_block(x, name='residual_block'):
x = ops.conv2d(x)
x = self.ops(x)
x = tf.nn.leaky_relu(x)
return x
if len(x) == 2:
x = tf.expand_dims(x, axis=-1)
else:
raise ValueError("[-] disc: waveform must be 2, 3-D")
for idx, f in enumerate(self.num_blocks):
x = residual_block(x)
return x
示例14: inception_v3_parameters
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_v3.
Args:
weight_decay: the weight decay for weights variables.
stddev: standard deviation of the truncated guassian weight distribution.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Yields:
a arg_scope with the parameters needed for inception_v3.
"""
# Set weight_decay for weights in Conv and FC layers.
with scopes.arg_scope([ops.conv2d, ops.fc],
weight_decay=weight_decay):
# Set stddev, activation and parameters for batch_norm.
with scopes.arg_scope([ops.conv2d],
stddev=stddev,
activation=tf.nn.relu,
batch_norm_params={
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon}) as arg_scope:
yield arg_scope
示例15: __call__
# 需要导入模块: import ops [as 别名]
# 或者: from ops import conv2d [as 别名]
def __call__(self, input):
with tf.variable_scope(self.name, reuse=self._reuse):
if not self._reuse:
print('\033[93m'+self.name+'\033[0m')
_ = input
num_channel = [32, 64, 128, 256, 256, 512, 512, 512, 512]
assert self._num_conv <= 10 and self._num_conv > 0
for i in range(self._num_conv):
_ = conv2d(_, num_channel[i], self._is_train, info=not self._reuse,
norm=self._norm_type, name='conv{}'.format(i+1))
if self._num_conv - i <= self._num_res_block:
_ = conv2d_res(
_, self._is_train, info=not self._reuse,
norm=self._norm_type,
name='res_block{}'.format(self._num_res_block - self._num_conv + i + 1))
_ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
info=not self._reuse, norm='none', name='conv{}'.format(i+2))
_ = conv2d(_, 1, self._is_train, k=1, s=1, info=not self._reuse,
activation_fn=None, norm='none',
name='conv{}'.format(i+3))
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return _