本文整理汇总了Python中tensorflow.Variables方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Variables方法的具体用法?Python tensorflow.Variables怎么用?Python tensorflow.Variables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_session
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto()
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tf.variables_initializer(tf.report_unitialized_variables()).run()
示例2: init_uninited_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def init_uninited_vars(vars=None):
if vars is None: vars = tf.global_variables()
test_vars = []; test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(':')[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
#----------------------------------------------------------------------------
# Set the values of given tf.Variables.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
示例3: set_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
示例4: assert_no_new_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def assert_no_new_variables():
"""Ensure that no tf.Variables are constructed inside the context.
Yields:
None
Raises:
ValueError: if there is a variable created.
"""
num_vars = len(tf.global_variables())
old_variables = tf.global_variables()
yield
if len(tf.global_variables()) != num_vars:
new_vars = set(tf.global_variables()) - set(old_variables)
tf.logging.error("NEW VARIABLES CREATED")
tf.logging.error(10*"=")
for v in new_vars:
tf.logging.error(v)
raise ValueError("Variables created inside an "
"assert_no_new_variables context")
if old_variables != tf.global_variables():
raise ValueError("Variables somehow changed inside an "
"assert_no_new_variables context."
"This means something modified the tf.global_variables()")
示例5: create_session
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto(allow_soft_placement=True)
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
#----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tf.variables_initializer(tf.report_unitialized_variables()).run()
示例6: add_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def add_model(self, input_data):
"""Adds a linear-layer plus a softmax transformation
The core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the mathematical
transformation effected is
y = softmax(xW + b)
Hint: Make sure to create tf.Variables as needed. Also, make sure to use
tf.name_scope to ensure that your name spaces are clean.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
out: A tensor of shape (batch_size, n_classes)
"""
with tf.variable_scope("softmax_model"):
self.W = tf.Variable(tf.zeros([self.config.n_features, self.config.n_classes]), name="weights")
self.b = tf.Variable(tf.zeros([self.config.n_classes]), name="biases")
return softmax(tf.matmul(input_data, self.W) + self.b)
示例7: add_prediction_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def add_prediction_op(self):
"""Adds the core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the transformation is a linear layer plus a
softmax transformation:
y = softmax(Wx + b)
Hint: Make sure to create tf.Variables as needed.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
pred: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
with tf.variable_scope("transformation"):
bias = tf.Variable(tf.random_uniform([self.config.n_classes]))
W = tf.Variable(tf.random_uniform([self.config.n_features, self.config.n_classes]))
z = tf.matmul(self.input_placeholder, W) + bias
pred = softmax(z)
### END YOUR CODE
return pred
示例8: get_tf_params
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def get_tf_params(scope):
"""
Makes a list of all tf.Variables under this scope
args
scope (str)
returns
params (list)
"""
# find scope parameters
params = [p for p in tf.trainable_variables()
if p.name.startswith(scope)]
# sort parameters list by the variable name
return sorted(params, key=lambda var: var.name)
示例9: var_list
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def var_list(self, mode=VlMode.RAW):
"""
Get the chunks that define this variable.
:param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
or MergedVariables
VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
MergedVariable
VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
:return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
"""
if mode == VlMode.RAW:
return self._var_list
elif mode == VlMode.BASE:
return self._get_base_variable_list()
elif mode == VlMode.TENSOR:
return self._var_list_as_tensors() # return w unic tensor + copies augmented
else:
raise NotImplementedError('mode %d does not exists' % mode)
示例10: _fisher_vector_product
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def _fisher_vector_product(self, mean_kl: tf.Tensor, var_list: list) -> Callable:
"""Get a function that computes the product of the KL Hessian and some vector v.
Use the fact that Hv = d^2 L / dt^2 v = d/dt (dL/dt) v = d/dt gv
Args:
mean_kl: tf.Tensor. The KL divergence between the old and the new policy
var_list: list of tf.Variables for which to compute gradients
Returns:
lambda, which takes as input a vector v and computes the product Hv
"""
# Compute the gradients of the KL divergence w.r.t. var_list and flatten them
grads = tf.gradients(mean_kl, var_list)
grad = tf_cg.flatten_tensors(grads) # shape: [None]
def compute_hvp(v):
# Compute the dot product between grad and v
v = tf.stop_gradient(v)
gvp = tf.reduce_sum(grad * v)
# Compute the matrix-vector product `Hv`, between the Hessian and v and flatten it
hvps = tf.gradients(gvp, var_list)
hvp = tf_cg.flatten_tensors(hvps)
hvp = tf.check_numerics(hvp, message="Invalid Fisher-vector product")
return hvp
return compute_hvp
示例11: variable_map
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def variable_map(self):
"""Map from original variable names into tf.Variables (or lists of them).
This map translates between variable names relative to the module and the
corresponding Variable objects that have been created by instantiating it
in the current graph (with the applicable scoping added). Each key in the
map is a variable name as created by running the module's defining
`module_fn` in the root scope of an empty graph. Each value in the map is
a Variable object, or in case of partitioned variables a list of Variable
objects.
This property can be used with `tf.init_from_checkpoint` as `assignment_map`
in order to restore a pre-trained checkpoint into a Module before calling
`Module.export()`.
Returns:
A dict from the variable names in the Module to the instantiated
tf.Variables or list of tf.Variables (if partitioned). The keys of this
map are the same regardless of the scope of where the Module was
instantiated.
"""
return self._impl.variable_map
示例12: norm_posterior
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def norm_posterior(dim, std0, suffix=None):
"""Initialise a posterior (diagonal) Normal distribution.
Parameters
----------
dim : tuple or list
the dimension of this distribution.
std0 : float, np.array
the initial (unoptimized) standard deviation of this distribution.
Must be a scalar or have the same shape as dim.
suffix : str
suffix to add to the names of the variables of the parameters of this
distribution.
Returns
-------
Q : tf.distributions.Normal
the initialised posterior Normal object.
Note
----
This will make tf.Variables on the mean standard deviation of the
posterior. The initialisation of the mean is zero and the initialisation of
the standard deviation is simply ``std0`` for each element.
"""
assert (np.ndim(std0) == 0) or (np.shape(std0) == dim)
mu_0 = tf.zeros(dim)
mu = tf.Variable(mu_0, name=_add_suffix("W_mu_q", suffix))
if np.ndim(std0) == 0:
std0 = tf.ones(dim) * std0
std = pos_variable(std0, name=_add_suffix("W_std_q", suffix))
summary_histogram(mu)
summary_histogram(std)
Q = tf.distributions.Normal(loc=mu, scale=std)
return Q
示例13: init_uninitialized_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
示例14: count_trainable_parameters
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def count_trainable_parameters():
"""
Counts the number of trainable parameters (e.g. tf.Variables) to get a rough idea of how complex
our Model is.
Returns:
int: The number of trainable parameters in the graph.
"""
num_trainable_parameters = 0
if get_backend() == "tf":
for variable in tf.trainable_variables():
num_trainable_parameters += get_shape(variable, flat=True)
return num_trainable_parameters
示例15: get_perturbable_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Variables [as 别名]
def get_perturbable_vars(scope):
"""
Get the trainable variables that can be perturbed when using
parameter noise.
:param scope: (str) tensorflow scope of the variables
:return: ([tf.Variables])
"""
return [var for var in tf_util.get_trainable_vars(scope) if 'LayerNorm' not in var.name]