本文整理汇总了Python中tensorflow.make_template方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.make_template方法的具体用法?Python tensorflow.make_template怎么用?Python tensorflow.make_template使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.make_template方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reuse_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def reuse_variables(scope):
"""
A decorator for transparent reuse of tensorflow
`Variables <https://www.tensorflow.org/api_docs/python/tf/Variable>`_ in a
function. The decorated function will automatically create variables the
first time they are called and reuse them thereafter.
.. note::
This decorator is internally implemented by tensorflow's
:func:`make_template` function. See `its doc
<https://www.tensorflow.org/api_docs/python/tf/make_template>`_
for requirements on the target function.
:param scope: A string. The scope name passed to tensorflow
`variable_scope()
<https://www.tensorflow.org/api_docs/python/tf/variable_scope>`_.
"""
return lambda f: tf.make_template(scope, f)
示例2: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, summaries=None, summary_labels=None):
"""
Creates a new optimizer instance.
"""
self.variables = dict()
self.summaries = summaries
if summary_labels is None:
self.summary_labels = dict()
else:
self.summary_labels = summary_labels
def custom_getter(getter, name, registered=False, **kwargs):
variable = getter(name=name, registered=True, **kwargs)
if not registered:
assert kwargs.get('trainable', False)
self.variables[name] = variable
return variable
# TensorFlow function
self.step = tf.make_template(
name_='step',
func_=self.tf_step,
custom_getter=custom_getter
)
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, scope='preprocessor', summary_labels=None):
self.summary_labels = set(summary_labels or ())
self.variables = dict()
self.summaries = list()
def custom_getter(getter, name, registered=False, **kwargs):
variable = getter(name=name, registered=True, **kwargs)
if not registered:
self.variables[name] = variable
return variable
self.process = tf.make_template(
name_=(scope + '/process'),
func_=self.tf_process,
custom_getter_=custom_getter
)
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, scope='exploration', summary_labels=None):
self.summary_labels = set(summary_labels or ())
self.variables = dict()
self.summaries = list()
def custom_getter(getter, name, registered=False, **kwargs):
variable = getter(name=name, registered=True, **kwargs)
if not registered:
self.variables[name] = variable
return variable
self.explore = tf.make_template(
name_=(scope + '/explore'),
func_=self.tf_explore,
custom_getter_=custom_getter
)
示例5: init_fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def init_fprop(self):
"""Initializes self.fprop. This should be called from subclasses' ctors.
This function will contruct all of the variables defined with
tf.get_variable in the sub classes fprop method and make a template
out of the fprop method. In this way, instead of using variable scopes
for variable reuse, the instantiation of the subclass will construct all
of the model variables, and subsequent calls of the objects fprop method
will add the fprop ops to the tensorflow graph using the tf variables
which were defined when init_fprop was first called. This way variable
reuse is trival by simply called model.fprop on different tensors. If you
don't want to reuse variables, you will instead define a different model
object.
"""
scope_name = self.__class__.__name__
self.fprop = tf.make_template(
scope_name, self._fprop, create_scope_now_=True)
if getattr(self.hparams, "use_placeholders", True):
# Call self.fprop() to initialize variables in a dummy name scope
# to manage the pollution
with tf.name_scope("UNUSED"):
args, kwargs = self.get_fprop_placeholders()
self.fprop(*args, **kwargs)
示例6: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(
self, state_size, belief_size, embed_size,
mean_only=False, min_stddev=1e-1, activation=tf.nn.elu,
encoder_to_decoder=False, sample_to_sample=True,
sample_to_encoder=True, decoder_to_encoder=False,
decoder_to_sample=True, action_to_decoder=False):
self._state_size = state_size
self._belief_size = belief_size
self._embed_size = embed_size
self._encoder_cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
self._decoder_cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
self._kwargs = dict(units=self._embed_size, activation=tf.nn.relu)
self._mean_only = mean_only
self._min_stddev = min_stddev
self._encoder_to_decoder = encoder_to_decoder
self._sample_to_sample = sample_to_sample
self._sample_to_encoder = sample_to_encoder
self._decoder_to_encoder = decoder_to_encoder
self._decoder_to_sample = decoder_to_sample
self._action_to_decoder = action_to_decoder
posterior_tpl = tf.make_template('posterior', self._posterior)
super(DRNN, self).__init__(posterior_tpl, posterior_tpl)
示例7: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(
self, state_size, belief_size, embed_size,
future_rnn=True, mean_only=False, min_stddev=0.1, activation=tf.nn.elu,
num_layers=1):
self._state_size = state_size
self._belief_size = belief_size
self._embed_size = embed_size
self._future_rnn = future_rnn
self._cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
self._kwargs = dict(units=self._embed_size, activation=activation)
self._mean_only = mean_only
self._min_stddev = min_stddev
self._num_layers = num_layers
super(RSSM, self).__init__(
tf.make_template('transition', self._transition),
tf.make_template('posterior', self._posterior))
示例8: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, arch, normalizers=None):
'''
Variational Auto Encoder (VAE)
Arguments:
`arch`: network architecture (`dict`)
'''
self.arch = arch
self.normalizers = normalizers
self.feat_type = arch['feat_type']
with tf.name_scope('SpeakerCode'):
self.y_emb = self._l2_regularized_embedding(
self.arch['y_dim'],
self.arch['z_dim'],
'y_embedding')
self.enc = tf.make_template(
'Encoder',
self.encoder)
self.dec = tf.make_template(
'Decoder',
self.decoder)
示例9: templatemethod
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def templatemethod(name_):
"""This decorator wraps a method with `tf.make_template`. For example,
@templatemethod
def my_method():
# Create variables
"""
def template_decorator(func):
"""Inner decorator function"""
def func_wrapper(*args, **kwargs):
"""Inner wrapper function"""
templated_func = tf.make_template(name_, func)
return templated_func(*args, **kwargs)
return func_wrapper
return template_decorator
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:21,代码来源:graph_utils.py
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, hparams=None):
if not hasattr(self, '_hparams'):
self._hparams = HParams(hparams, self.default_hparams())
else:
# Probably already parsed by subclasses. We rely on subclass
# implementations to get this right.
# As a sanity check, we require `hparams` to be `None` in this case.
if hparams is not None:
raise ValueError(
"`self._hparams` already exists. Argument `hparams` "
"must be set to `None` in this case.")
self._template = tf.make_template(self._hparams.name, self._build,
create_scope_now_=True)
self._unique_name = self.variable_scope.name.split("/")[-1]
self._trainable_variables = []
self._built = False
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, dequant_flow):
super().__init__()
assert isinstance(dequant_flow, Flow)
self.dequant_flow = dequant_flow
def deep_processor(x, *, init, ema, dropout_p):
(this, that), _ = CheckerboardSplit().forward(x)
processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
for i in range(5):
processed_context = gated_resnet(
processed_context, name='c{}'.format(i),
a=None, dropout_p=dropout_p, ema=ema, init=init,
use_nin=False
)
processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
return processed_context
self.context_proc = tf.make_template("context_proc", deep_processor)
示例12: define_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def define_graph(config):
network_tpl = tf.make_template('network', network, config=config)
inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
targets = tf.placeholder(tf.float32, [None, 1])
num_visible = tf.placeholder(tf.int32, [])
batch_size = tf.shape(inputs)[0]
data_dist = network_tpl(inputs)
losses = [
-data_dist.log_prob(targets),
]
loss = sum(tf.reduce_sum(loss) for loss in losses) / tf.to_float(batch_size)
optimizer = tf.train.AdamOptimizer(config.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True))
if config.clip_gradient:
gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
optimize = optimizer.apply_gradients(zip(gradients, variables))
data_mean = data_dist.mean()
data_noise = data_dist.stddev()
data_uncertainty = data_dist.stddev()
return tools.AttrDict(locals())
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
use_efficient_backprop=True):
if isinstance(f, list):
assert len(f) == num_layers
else:
f = [f] * num_layers
if isinstance(g, list):
assert len(g) == num_layers
else:
g = [g] * num_layers
scope_prefix = "revblock/revlayer_%d/"
f_scope = scope_prefix + "f"
g_scope = scope_prefix + "g"
f = [
tf.make_template(f_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(f)
]
g = [
tf.make_template(g_scope % i, fn, create_scope_now_=True)
for i, fn in enumerate(g)
]
self.f = f
self.g = g
self.num_layers = num_layers
self.f_side_input = f_side_input or []
self.g_side_input = g_side_input or []
self._use_efficient_backprop = use_efficient_backprop
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, hparams=None):
self._hparams = HParams(hparams, self.default_hparams())
self._template = tf.make_template(self._hparams.name, self._build,
create_scope_now_=True)
self._unique_name = self.variable_scope.name.split("/")[-1]
self._trainable_variables = []
self._built = False
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import make_template [as 别名]
def __init__(self, env_config, hparams=None):
AgentBase.__init__(self, hparams)
self._env_config = env_config
self._reset_tmplt_fn = tf.make_template(
"{}_reset".format(self.name), self._reset)
self._observe_tmplt_fn = tf.make_template(
"{}_observe".format(self.name), self._observe)
self._get_action_tmplt_fn = tf.make_template(
"{}_get_action".format(self.name), self._get_action)