本文整理汇总了Python中tensorflow.make_template函数的典型用法代码示例。如果您正苦于以下问题:Python make_template函数的具体用法?Python make_template怎么用?Python make_template使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_template函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, arch, is_training=False):
'''
Variational auto-encoder implemented in 2D convolutional neural nets
Input:
`arch`: network architecture (`dict`)
`is_training`: (unused now) it was kept for historical reasons (for `BatchNorm`)
'''
self.arch = arch
self._sanity_check()
self.is_training = is_training
with tf.name_scope('SpeakerRepr'):
self.y_emb = self._l2_regularized_embedding(
self.arch['y_dim'],
self.arch['z_dim'],
'y_embedding')
self._generate = tf.make_template(
'Generator',
self._generator)
self._encode = tf.make_template(
'Encoder',
self._encoder)
self.generate = self.decode # for VAE-GAN extension
示例2: _build_networks
def _build_networks(self):
"""Builds the Q-value network computations needed for acting and training.
These are:
self.online_convnet: For computing the current state's Q-values.
self.target_convnet: For computing the next state's target Q-values.
self._net_outputs: The actual Q-values.
self._q_argmax: The action maximizing the current state's Q-values.
self._replay_net_outputs: The replayed states' Q-values.
self._replay_next_target_net_outputs: The replayed next states' target
Q-values (see Mnih et al., 2015 for details).
"""
# Calling online_convnet will generate a new graph as defined in
# self._get_network_template using whatever input is passed, but will always
# share the same weights.
self.online_convnet = tf.make_template('Online', self._network_template)
self.target_convnet = tf.make_template('Target', self._network_template)
self._net_outputs = self.online_convnet(self.state_ph)
# TODO(bellemare): Ties should be broken. They are unlikely to happen when
# using a deep network, but may affect performance with a linear
# approximation scheme.
self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]
self._replay_net_outputs = self.online_convnet(self._replay.states)
self._replay_next_target_net_outputs = self.target_convnet(
self._replay.next_states)
示例3: __init__
def __init__(self, trainable=False,
state_preprocess_net=lambda states: states,
action_embed_net=lambda actions, *args, **kwargs: actions,
ndims=None):
self.trainable = trainable
self._scope = tf.get_variable_scope().name
self._ndims = ndims
self._state_preprocess_net = tf.make_template(
self.STATE_PREPROCESS_NET_SCOPE, state_preprocess_net,
create_scope_now_=True)
self._action_embed_net = tf.make_template(
self.ACTION_EMBED_NET_SCOPE, action_embed_net,
create_scope_now_=True)
示例4: __init__
def __init__(self, corpus, **opts):
self.corpus = corpus
self.opts = opts
self.global_step = get_or_create_global_step()
self.increment_global_step_op = tf.assign(self.global_step, self.global_step + 1, name="increment_global_step")
self.corpus_size = get_corpus_size(self.corpus["train"])
self.corpus_size_valid = get_corpus_size(self.corpus["valid"])
self.word2idx, self.idx2word = build_vocab(self.corpus["train"])
self.vocab_size = len(self.word2idx)
self.generator_template = tf.make_template(GENERATOR_PREFIX, generator)
self.discriminator_template = tf.make_template(DISCRIMINATOR_PREFIX, discriminator)
self.enqueue_data, _, source, target, sequence_length = \
prepare_data(self.corpus["train"], self.word2idx, num_threads=7, **self.opts)
# TODO: option to either do pretrain or just generate?
self.g_tensors_pretrain = self.generator_template(
source, target, sequence_length, self.vocab_size, **self.opts)
self.enqueue_data_valid, self.input_ph, source_valid, target_valid, sequence_length_valid = \
prepare_data(self.corpus["valid"], self.word2idx, num_threads=1, **self.opts)
self.g_tensors_pretrain_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, **self.opts)
self.decoder_fn = prepare_custom_decoder(
sequence_length, self.g_tensors_pretrain.embedding_matrix, self.g_tensors_pretrain.output_projections)
self.g_tensors_fake = self.generator_template(
source, target, sequence_length, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
self.g_tensors_fake_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
# TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing)
self.d_tensors_real = self.discriminator_template(
self.g_tensors_pretrain.rnn_outputs, sequence_length, is_real=True, **self.opts)
# TODO: check to see if sequence_length is correct
self.d_tensors_fake = self.discriminator_template(
self.g_tensors_fake.rnn_outputs, None, is_real=False, **self.opts)
self.g_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=GENERATOR_PREFIX)
self.d_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=DISCRIMINATOR_PREFIX)
示例5: _initialize_policy
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
示例6: __init__
def __init__(self, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.name = self.kwargs.get("name", self.func.__name__)
self._template = tf.make_template(self.name, self.func, create_scope_now_=True)
self._unique_name = self._template.variable_scope.name.split("/")[-1]
self._summary_added = False
示例7: test_variable_reuse_with_template
def test_variable_reuse_with_template(self):
tmpl1 = tf.make_template("test", tf.contrib.layers.legacy_fully_connected, num_output_units=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
示例8: build_model
def build_model(self):
sc = predictron_arg_scope()
with tf.variable_scope('state'):
with slim.arg_scope(sc):
state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact')
state = slim.conv2d(state, 32, [3, 3], scope='conv2')
state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact')
iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter')
rewards_arr = []
gammas_arr = []
lambdas_arr = []
values_arr = []
for k in range(self.max_depth):
state, reward, gamma, lambda_, value = iter_template(state)
rewards_arr.append(reward)
gammas_arr.append(gamma)
lambdas_arr.append(lambda_)
values_arr.append(value)
_, _, _, _, value = iter_template(state)
# K + 1 elements
values_arr.append(value)
bs = tf.shape(self.inputs)[0]
# [batch_size, K * maze_size]
self.rewards = tf.pack(rewards_arr, axis=1)
# [batch_size, K, maze_size]
self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards],
axis=1, name='rewards')
# [batch_size, K * maze_size]
self.gammas = tf.pack(gammas_arr, axis=1)
# [batch_size, K, maze_size]
self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size])
# [batch_size, K + 1, maze_size]
self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas],
axis=1, name='gammas')
# [batch_size, K * maze_size]
self.lambdas = tf.pack(lambdas_arr, axis=1)
# [batch_size, K, maze_size]
self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size])
# [batch_size, (K + 1) * maze_size]
self.values = tf.pack(values_arr, axis=1)
# [batch_size, K + 1, maze_size]
self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size])
self.build_preturns()
self.build_lambda_preturns()
示例9: test_variable_reuse_with_template
def test_variable_reuse_with_template(self):
tmpl1 = tf.make_template("test", tf.learn.fully_connected, num_output_nodes=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with tf.Session() as sess:
tf.initialize_all_variables().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
assert_summary_scope(r"test(_\d)?/fully_connected")
示例10: testBijectorConditionKwargs
def testBijectorConditionKwargs(self):
batch_size = 3
x_ = np.linspace(-1.0, 1.0, (batch_size * 4 * 2)).astype(
np.float32).reshape((batch_size, 4 * 2))
conditions = {
"a": tf.random_normal((batch_size, 4), dtype=tf.float32),
"b": tf.random_normal((batch_size, 2), dtype=tf.float32),
}
def _condition_shift_and_log_scale_fn(x0, output_units, a, b):
x = tf.concat((x0, a, b), axis=-1)
out = tf.layers.dense(
inputs=x,
units=2 * output_units)
shift, log_scale = tf.split(out, 2, axis=-1)
return shift, log_scale
condition_shift_and_log_scale_fn = tf.make_template(
"real_nvp_condition_template", _condition_shift_and_log_scale_fn)
nvp = tfb.RealNVP(
num_masked=4,
validate_args=True,
is_constant_jacobian=False,
shift_and_log_scale_fn=condition_shift_and_log_scale_fn)
x = tf.constant(x_)
forward_x = nvp.forward(x, **conditions)
# Use identity to invalidate cache.
inverse_y = nvp.inverse(tf.identity(forward_x), **conditions)
forward_inverse_y = nvp.forward(inverse_y, **conditions)
fldj = nvp.forward_log_det_jacobian(x, event_ndims=1, **conditions)
# Use identity to invalidate cache.
ildj = nvp.inverse_log_det_jacobian(
tf.identity(forward_x), event_ndims=1, **conditions)
self.evaluate(tf.global_variables_initializer())
[
forward_x_,
inverse_y_,
forward_inverse_y_,
ildj_,
fldj_,
] = self.evaluate([
forward_x,
inverse_y,
forward_inverse_y,
ildj,
fldj,
])
self.assertEqual("real_nvp", nvp.name)
self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-6, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-6, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
示例11: initialize_graph
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
示例12: __init__
def __init__(self, mode=None, batch_size=hp_default.batch_size, queue=True):
self.mode = mode
self.batch_size = batch_size
self.queue = queue
self.is_training = self.get_is_training(mode)
# Input
self.x_mfcc, self.y_ppgs, self.y_spec, self.y_mel, self.num_batch = self.get_input(mode, batch_size, queue)
# Networks
self.net_template = tf.make_template('net', self._net2)
self.ppgs, self.pred_ppg, self.logits_ppg, self.pred_spec, self.pred_mel = self.net_template()
示例13: __init__
def __init__(self,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
use_efficient_backprop=True):
if isinstance(f, list):
assert len(f) == num_layers
else:
f = [f] * num_layers
if isinstance(g, list):
assert len(g) == num_layers
else:
g = [g] * num_layers
scope_prefix = "revblock/revlayer_%d/"
f_scope = scope_prefix + "f"
g_scope = scope_prefix + "g"
f = [
tf.make_template(f_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(f)
]
g = [
tf.make_template(g_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(g)
]
self.f = f
self.g = g
self.num_layers = num_layers
self.f_side_input = f_side_input or []
self.g_side_input = g_side_input or []
self._use_efficient_backprop = use_efficient_backprop
示例14: __init__
def __init__(self, name):
"""
Initialize the module. Each subclass must call this constructor with a name.
Args:
name: Name of this module. Used for `tf.make_template`.
"""
self.name = name
self._template = tf.make_template(name, self._build, create_scope_now_=True)
# Docstrings for the class should be the docstring for the _build method
self.__doc__ = self._build.__doc__
# pylint: disable=E1101
self.__call__.__func__.__doc__ = self._build.__doc__
示例15: test_all_ckpt
def test_all_ckpt(modelPath, fileOrDir,flags):
tf.reset_default_graph()
tf.logging.warning(modelPath)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
shared_model = tf.make_template('shared_model', model)
output_tensor, weights = shared_model(input_tensor)
output_tensor = tf.clip_by_value(output_tensor, 0., 1.)
output_tensor = output_tensor * 255
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
for ckpt in ckptFiles:
epoch = int(ckpt.split('_')[-1].split('.')[0])
if flags==0:
if epoch != 555:
continue
elif flags==1:
if epoch!= 555:
continue
else:
if epoch != 555:
continue
tf.logging.warning("epoch:%d\t"%epoch)
saver.restore(sess,os.path.join(modelPath,ckpt))
total_imgs = len(fileName_list)
for i in range(total_imgs):
imgY = original_ycbcr[i][0]
out = sess.run(output_tensor, feed_dict={input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out