本文整理汇总了Python中prettytensor.wrap方法的典型用法代码示例。如果您正苦于以下问题:Python prettytensor.wrap方法的具体用法?Python prettytensor.wrap怎么用?Python prettytensor.wrap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类prettytensor
的用法示例。
在下文中一共展示了prettytensor.wrap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_decoder
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def _build_decoder(self, weight_init=tf.truncated_normal):
"""Construct decoder network: placeholders, operations, optimizer,
extract gradient back-prop for encoding layer"""
self._clamped = tf.placeholder(tf.float32, (FLAGS.batch_size, self.layer_narrow))
self._reconstruction = tf.placeholder(tf.float32, self._batch_shape)
clamped_init = np.zeros((FLAGS.batch_size, self.layer_narrow), dtype=np.float32)
self._clamped_variable = tf.Variable(clamped_init, name='clamped')
self._assign_clamped = tf.assign(self._clamped_variable, self._clamped)
# http://stackoverflow.com/questions/40194389/how-to-propagate-gradient-into-a-variable-after-assign-operation
self._decode = (
pt.wrap(self._clamped_variable)
.fully_connected(self.layer_decoder, name='decoder_1')
.fully_connected(np.prod(self._image_shape), init=weight_init, name='output')
.reshape(self._batch_shape))
# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.decoder_scope)
self._decoder_loss = self._build_reco_loss(self._reconstruction)
self._opt_decoder = self._optimizer(learning_rate=FLAGS.learning_rate)
self._train_decoder = self._opt_decoder.minimize(self._decoder_loss)
self._clamped_grad, = tf.gradients(self._decoder_loss, [self._clamped_variable])
# DATA
示例2: encoder
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def encoder(self, inputs, latent_size, activ=tf.nn.elu, phase=pt.Phase.train):
with pt.defaults_scope(activation_fn=activ,
batch_normalize=True,
learned_moments_update_rate=0.0003,
variance_epsilon=0.001,
scale_after_normalization=True,
phase=phase):
params = (pt.wrap(inputs).
reshape([-1, self.input_shape[0], self.input_shape[1], 1]).
conv2d(5, 32, stride=2).
conv2d(5, 64, stride=2).
conv2d(5, 128, edges='VALID').
#dropout(0.9).
flatten().
fully_connected(self.latent_size * 2, activation_fn=None)).tensor
mean = params[:, :self.latent_size]
stddev = params[:, self.latent_size:]
return [mean, stddev]
示例3: __init__
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def __init__(self, scope):
with tf.variable_scope("%s_shared" % scope):
self.obs = obs = tf.placeholder(
tf.float32, shape=[None] + pms.obs_shape, name="%s_obs"%scope)
self.action_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape], name="%s_action"%scope)
self.advant = tf.placeholder(tf.float32, shape=[None], name="%s_advant"%scope)
self.old_dist_means_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
name="%s_oldaction_dist_means"%scope)
self.old_dist_logstds_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
name="%s_oldaction_dist_logstds"%scope)
self.action_dist_means_n = (pt.wrap(self.obs).
fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc1"%scope).
fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc2"%scope).
fully_connected(pms.action_shape, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc3"%scope))
self.N = tf.shape(obs)[0]
Nf = tf.cast(self.N, tf.float32)
self.action_dist_logstd_param = tf.Variable((.01*np.random.randn(1, pms.action_shape)).astype(np.float32), name="%spolicy_logstd"%scope)
self.action_dist_logstds_n = tf.tile(self.action_dist_logstd_param,
tf.pack((tf.shape(self.action_dist_means_n)[0], 1)))
self.var_list = [v for v in tf.trainable_variables()if v.name.startswith(scope)]
示例4: __init__
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def __init__(self, scope):
with tf.variable_scope("%s_shared" % scope):
self.obs = obs = tf.placeholder(
tf.float32, shape=[None, pms.obs_shape], name="%s_obs"%scope)
self.action_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape], name="%s_action"%scope)
self.advant = tf.placeholder(tf.float32, shape=[None], name="%s_advant"%scope)
self.old_dist_means_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
name="%s_oldaction_dist_means"%scope)
self.old_dist_logstds_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
name="%s_oldaction_dist_logstds"%scope)
self.action_dist_means_n = (pt.wrap(self.obs).
fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc1"%scope).
fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc2"%scope).
fully_connected(pms.action_shape, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
name="%s_fc3"%scope))
self.N = tf.shape(obs)[0]
Nf = tf.cast(self.N, tf.float32)
self.action_dist_logstd_param = tf.Variable((.01*np.random.randn(1, pms.action_shape)).astype(np.float32), name="%spolicy_logstd"%scope)
self.action_dist_logstds_n = tf.tile(self.action_dist_logstd_param,
tf.pack((tf.shape(self.action_dist_means_n)[0], 1)))
self.var_list = [v for v in tf.trainable_variables()if v.name.startswith(scope)]
示例5: main_network
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def main_network(images, training):
x_pretty = pt.wrap(images)
if training:
phase = pt.Phase.train
else:
phase = pt.Phase.infer
with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=64, name="layer_conv1", batch_normalize=True).\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=64, name="layer_conv2").\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=256, name="layer_fc1").\
fully_connected(size=128, name="layer_fc2").\
softmax_classifier(num_classes, labels=y_true)
return y_pred, loss
示例6: generate_condition
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def generate_condition(self, c_var):
conditions =\
(pt.wrap(c_var).
flatten().
custom_fully_connected(self.ef_dim * 2).
apply(leaky_rectify, leakiness=0.2))
mean = conditions[:, :self.ef_dim]
log_sigma = conditions[:, self.ef_dim:]
return [mean, log_sigma]
示例7: generator_simple
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def generator_simple(self, z_var):
output_tensor =\
(pt.wrap(z_var).
flatten().
custom_fully_connected(self.s16 * self.s16 * self.gf_dim * 8).
reshape([-1, self.s16, self.s16, self.gf_dim * 8]).
conv_batch_norm().
apply(tf.nn.relu).
custom_deconv2d([0, self.s8, self.s8, self.gf_dim * 4], k_h=4, k_w=4).
# apply(tf.image.resize_nearest_neighbor, [self.s8, self.s8]).
# custom_conv2d(self.gf_dim * 4, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
custom_deconv2d([0, self.s4, self.s4, self.gf_dim * 2], k_h=4, k_w=4).
# apply(tf.image.resize_nearest_neighbor, [self.s4, self.s4]).
# custom_conv2d(self.gf_dim * 2, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
custom_deconv2d([0, self.s2, self.s2, self.gf_dim], k_h=4, k_w=4).
# apply(tf.image.resize_nearest_neighbor, [self.s2, self.s2]).
# custom_conv2d(self.gf_dim, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
custom_deconv2d([0] + list(self.image_shape), k_h=4, k_w=4).
# apply(tf.image.resize_nearest_neighbor, [self.s, self.s]).
# custom_conv2d(3, k_h=3, k_w=3, d_h=1, d_w=1).
apply(tf.nn.tanh))
return output_tensor
示例8: generate_condition
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def generate_condition(self, c_var):
conditions =\
(pt.wrap(c_var).
flatten().
custom_fully_connected(self.ef_dim * 2).
apply(leaky_rectify, leakiness=0.2))
mean = conditions[:, :self.ef_dim]
log_sigma = conditions[:, self.ef_dim:]
return [mean, log_sigma]
# stage I generator (g)
示例9: hr_g_encode_image
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def hr_g_encode_image(self, x_var):
output_tensor = \
(pt.wrap(x_var). # -->s * s * 3
custom_conv2d(self.gf_dim, k_h=3, k_w=3, d_h=1, d_w=1). # s * s * gf_dim
apply(tf.nn.relu).
custom_conv2d(self.gf_dim * 2, k_h=4, k_w=4). # s2 * s2 * gf_dim * 2
conv_batch_norm().
apply(tf.nn.relu).
custom_conv2d(self.gf_dim * 4, k_h=4, k_w=4). # s4 * s4 * gf_dim * 4
conv_batch_norm().
apply(tf.nn.relu))
return output_tensor
示例10: hr_g_joint_img_text
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def hr_g_joint_img_text(self, x_c_code):
output_tensor = \
(pt.wrap(x_c_code). # -->s4 * s4 * (ef_dim+gf_dim*4)
custom_conv2d(self.gf_dim * 4, k_h=3, k_w=3, d_h=1, d_w=1). # s4 * s4 * gf_dim * 4
conv_batch_norm().
apply(tf.nn.relu))
return output_tensor
示例11: hr_generator
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def hr_generator(self, x_c_code):
output_tensor = \
(pt.wrap(x_c_code). # -->s4 * s4 * gf_dim*4
# custom_deconv2d([0, self.s2, self.s2, self.gf_dim * 2], k_h=4, k_w=4). # -->s2 * s2 * gf_dim*2
apply(tf.image.resize_nearest_neighbor, [self.s2, self.s2]).
custom_conv2d(self.gf_dim * 2, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
# custom_deconv2d([0, self.s, self.s, self.gf_dim], k_h=4, k_w=4). # -->s * s * gf_dim
apply(tf.image.resize_nearest_neighbor, [self.s, self.s]).
custom_conv2d(self.gf_dim, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
# custom_deconv2d([0, self.s * 2, self.s * 2, self.gf_dim // 2], k_h=4, k_w=4). # -->2s * 2s * gf_dim/2
apply(tf.image.resize_nearest_neighbor, [self.s * 2, self.s * 2]).
custom_conv2d(self.gf_dim // 2, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
# custom_deconv2d([0, self.s * 4, self.s * 4, self.gf_dim // 4], k_h=4, k_w=4). # -->4s * 4s * gf_dim//4
apply(tf.image.resize_nearest_neighbor, [self.s * 4, self.s * 4]).
custom_conv2d(self.gf_dim // 4, k_h=3, k_w=3, d_h=1, d_w=1).
conv_batch_norm().
apply(tf.nn.relu).
custom_conv2d(3, k_h=3, k_w=3, d_h=1, d_w=1). # -->4s * 4s * 3
apply(tf.nn.tanh))
return output_tensor
示例12: multilayer_fully_connected
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def multilayer_fully_connected(images, labels):
images = pt.wrap(images)
with pt.defaults_scope(activation_fn=tf.nn.relu,l2loss=0.00001):
return (images.flatten().\
fully_connected(100).\
fully_connected(100).\
softmax_classifier(10, labels))
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow-Second-Edition,代码行数:9,代码来源:pretty_tensor_digit.py
示例13: lenet5
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def lenet5(images, labels):
images = pt.wrap(images)
with pt.defaults_scope\
(activation_fn=tf.nn.relu, l2loss=0.00001):
return (images.conv2d(5, 20).\
max_pool(2, 2).\
conv2d(5, 50).\
max_pool(2, 2).\
flatten().\
fully_connected(500).\
softmax_classifier(10, labels))
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow-Second-Edition,代码行数:13,代码来源:pretty_tensor_digit.py
示例14: _build_encoder
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def _build_encoder(self):
"""Construct encoder network: placeholders, operations, optimizer"""
self._input = tf.placeholder(tf.float32, self._batch_shape, name='input')
self._encoding = tf.placeholder(tf.float32, (FLAGS.batch_size, self.layer_narrow), name='encoding')
self._encode = (pt.wrap(self._input)
.flatten()
.fully_connected(self.layer_encoder, name='enc_hidden')
.fully_connected(self.layer_narrow, name='narrow'))
# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.encoder_scope)
self._encoder_loss = self._encode.l1_regression(pt.wrap(self._encoding))
ut.print_info('new learning rate: %.8f (%f)' % (FLAGS.learning_rate/FLAGS.batch_size, FLAGS.learning_rate))
self._opt_encoder = self._optimizer(learning_rate=FLAGS.learning_rate/FLAGS.batch_size)
self._train_encoder = self._opt_encoder.minimize(self._encoder_loss)
示例15: _build_encoder
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import wrap [as 别名]
def _build_encoder(self):
"""Construct encoder network: placeholders, operations, optimizer"""
self._input = tf.placeholder(tf.float32, self._batch_shape, name='input')
self._encoding = tf.placeholder(tf.float32, (FLAGS.batch_size, self.layer_narrow), name='encoding')
self._encode = (pt.wrap(self._input)
.flatten()
.fully_connected(self.layer_encoder, name='enc_hidden')
.fully_connected(self.layer_narrow, name='narrow'))
self._encode = pt.wrap(self._input)
self._encode = self._encode.conv2d(5, 32, stride=2)
print(self._encode.get_shape())
self._encode = self._encode.conv2d(5, 64, stride=2)
print(self._encode.get_shape())
self._encode = self._encode.conv2d(5, 128, stride=2)
print(self._encode.get_shape())
self._encode = (self._encode.dropout(0.9).
flatten().
fully_connected(self.layer_narrow, activation_fn=None))
# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.encoder_scope)
self._encoder_loss = self._encode.l1_regression(pt.wrap(self._encoding))
ut.print_info('new learning rate: %.8f (%f)' % (FLAGS.learning_rate/FLAGS.batch_size, FLAGS.learning_rate))
self._opt_encoder = self._optimizer(learning_rate=FLAGS.learning_rate/FLAGS.batch_size)
self._train_encoder = self._opt_encoder.minimize(self._encoder_loss)