本文整理汇总了Python中tensorflow.Tensors方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Tensors方法的具体用法?Python tensorflow.Tensors怎么用?Python tensorflow.Tensors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Tensors方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def call(self, states, test=False):
"""
Compute actions and log probability of the selected action
:return action (tf.Tensors): Tensor of actions
:return log_probs (tf.Tensor): Tensors of log probabilities of selected actions
"""
param = self._compute_dist(states)
if test:
action = tf.math.argmax(param["prob"], axis=1) # (size,)
else:
action = tf.squeeze(self.dist.sample(param), axis=1) # (size,)
log_prob = self.dist.log_likelihood(
tf.one_hot(indices=action, depth=self.action_dim), param)
return action, log_prob, param
示例2: compute_log_probs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def compute_log_probs(self, states, actions):
"""Compute log probabilities of inputted actions
:param states (tf.Tensor): Tensors of inputs to NN
:param actions (tf.Tensor): Tensors of NOT one-hot vector.
They will be converted to one-hot vector inside this function.
"""
param = self._compute_dist(states)
actions = tf.one_hot(
indices=tf.squeeze(actions),
depth=self.action_dim)
param["prob"] = tf.cond(
tf.math.greater(tf.rank(actions), tf.rank(param["prob"])),
lambda: tf.expand_dims(param["prob"], axis=0),
lambda: param["prob"])
actions = tf.cond(
tf.math.greater(tf.rank(param["prob"]), tf.rank(actions)),
lambda: tf.expand_dims(actions, axis=0),
lambda: actions)
log_prob = self.dist.log_likelihood(actions, param)
return log_prob
示例3: pad_tensor_dict
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def pad_tensor_dict(tensor_dict, max_len):
"""Pad dictionary of tensors with zeros.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
示例4: head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def head(cls, inputs, targets, name='head', **kwargs):
""" The last network layers which produce predictions. Process all output from body.
Parameters
----------
inputs : list of tf.Tensors
Input tensors.
targets : tf.Tensor
name : str
Scope name.
Returns
-------
list of tf.Tensors
"""
res = []
for i, x in enumerate(inputs):
res.append(super().head(x, targets, name=name+'-'+str(i), **kwargs))
return res
示例5: _adapt_sym
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def _adapt_sym(self, surr_obj, params_var):
"""
Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj
Args:
surr_obj (tf_op) : tensorflow op for task specific (inner) objective
params_var (dict) : dict of tf.Tensors for current policy params
Returns:
(dict): dict of tf.Tensors for adapted policy params
"""
# TODO: Fix this if we want to learn the learning rate (it isn't supported right now).
update_param_keys = list(params_var.keys())
grads = tf.gradients(surr_obj, [params_var[key] for key in update_param_keys])
gradients = dict(zip(update_param_keys, grads))
# gradient descent
adapted_policy_params = [params_var[key] - tf.multiply(self.step_sizes[key], gradients[key])
for key in update_param_keys]
adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))
return adapted_policy_params_dict
示例6: step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def step(self):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
for idx, (images, labels) in enumerate(self.train_ds):
if idx > MAX_TRAIN_BATCH: # This is optional and can be removed.
break
self.tf_train_step(images, labels)
for test_images, test_labels in self.test_ds:
self.tf_test_step(test_images, test_labels)
# It is important to return tf.Tensors as numpy objects.
return {
"epoch": self.iteration,
"loss": self.train_loss.result().numpy(),
"accuracy": self.train_accuracy.result().numpy() * 100,
"test_loss": self.test_loss.result().numpy(),
"mean_accuracy": self.test_accuracy.result().numpy() * 100
}
示例7: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def __call__(self, *parameters, solver_args={}):
"""Solve problem (or a batch of problems) corresponding to `parameters`
Args:
parameters: a sequence of tf.Tensors; the n-th Tensor specifies
the value for the n-th CVXPY Parameter. These Tensors
can be batched: if a Tensor has 3 dimensions, then its
first dimension is interpreted as the batch size.
solver_args: a dict of optional arguments, to send to `diffcp`. Keys
should be the names of keyword arguments.
Returns:
a list of optimal variable values, one for each CVXPY Variable
supplied to the constructor.
"""
if len(parameters) != len(self.params):
raise ValueError('A tensor must be provided for each CVXPY '
'parameter; received %d tensors, expected %d' % (
len(parameters), len(self.params)))
compute = tf.custom_gradient(
lambda *parameters: self._compute(parameters, solver_args))
return compute(*parameters)
示例8: var_list
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def var_list(self, mode=VlMode.RAW):
"""
Get the chunks that define this variable.
:param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
or MergedVariables
VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
MergedVariable
VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
:return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
"""
if mode == VlMode.RAW:
return self._var_list
elif mode == VlMode.BASE:
return self._get_base_variable_list()
elif mode == VlMode.TENSOR:
return self._var_list_as_tensors() # return w unic tensor + copies augmented
else:
raise NotImplementedError('mode %d does not exists' % mode)
示例9: test_python_constants_not_exposed
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def test_python_constants_not_exposed(self):
"""Tests that only TensorFlow values are exposed to users."""
x_fn = lambda: tf.constant(1.0)
tensorspec = tf.TensorSpec.from_tensor(x_fn())
encoder_py = gather_encoder.GatherEncoder.from_encoder(
core_encoder.EncoderComposer(
test_utils.SimpleLinearEncodingStage(2.0, 3.0)).add_parent(
test_utils.PlusOneEncodingStage(), P1_VALS).add_parent(
test_utils.SimpleLinearEncodingStage(2.0, 3.0),
SL_VALS).make(), tensorspec)
a_var = tf.compat.v1.get_variable('a_var', initializer=2.0)
b_var = tf.compat.v1.get_variable('b_var', initializer=3.0)
encoder_tf = gather_encoder.GatherEncoder.from_encoder(
core_encoder.EncoderComposer(
test_utils.SimpleLinearEncodingStage(a_var, b_var)).add_parent(
test_utils.PlusOneEncodingStage(), P1_VALS).add_parent(
test_utils.SimpleLinearEncodingStage(a_var, b_var),
SL_VALS).make(), tensorspec)
(encode_params_py, decode_before_sum_params_py,
decode_after_sum_params_py) = encoder_py.get_params()
(encode_params_tf, decode_before_sum_params_tf,
decode_after_sum_params_tf) = encoder_tf.get_params()
# Params that are Python constants -- not tf.Tensors -- should be hidden
# from the user, and made statically available at appropriate locations.
self.assertLen(encode_params_py, 1)
self.assertLen(encode_params_tf, 5)
self.assertLen(decode_before_sum_params_py, 1)
self.assertLen(decode_before_sum_params_tf, 3)
self.assertEmpty(decode_after_sum_params_py)
self.assertLen(decode_after_sum_params_tf, 2)
示例10: flatten_batch_dict
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def flatten_batch_dict(d, name='flatten_batch_dict'):
"""Flatten a batch of observations represented as a dict.
Args:
d (dict[tf.Tensor]): A dict of Tensors to flatten.
name (string): The name of the operation (None by default).
Returns:
dict[tf.Tensor]: A dict with flattened tensors.
"""
with tf.name_scope(name):
return {k: flatten_batch(v) for k, v in d.items()}
示例11: flatten_tensor_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def flatten_tensor_variables(ts):
"""Flattens a list of tensors into a single, 1-dimensional tensor.
Args:
ts (Iterable): Iterable containing either tf.Tensors or arrays.
Returns:
tf.Tensor: Flattened Tensor.
"""
return tf.concat(axis=0,
values=[tf.reshape(x, [-1]) for x in ts],
name='flatten_tensor_variables')
示例12: pad_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def pad_tensor(x, max_len):
"""Pad tensors with zeros.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
return np.concatenate([
x,
np.tile(np.zeros_like(x[0]),
(max_len - len(x), ) + (1, ) * np.ndim(x[0]))
])
示例13: pad_tensor_n
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
示例14: _set_hyper
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def _set_hyper(self, name, value):
"""Set hyper `name` to value. value must be numeric."""
if self._hypers_created:
if not isinstance(self._hyper[name], tf.Variable):
raise AttributeError("Can't set attribute: {}".format(name))
if not isinstance(value, numbers.Number):
raise ValueError('Dynamic reassignment only supports setting with a '
'number. tf.Tensors and tf.Variables can only be used '
'before the internal kfac optimizer is created.')
backend.set_value(self._hyper[name], value)
else:
super(Kfac, self)._set_hyper(name, value)
示例15: _filter_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Tensors [as 别名]
def _filter_tensor(inputs, cond, *args):
""" Create indixes and elements of inputs which consists for which cond is True.
Parameters
----------
inputs: tf.Tensor
input tensor
cond: callable or float
condition to choose elements. If float, elements which greater the cond will be choosen
*args: tf.Tensors:
tensors with the same shape as inputs. Will be returned corresponding elements of them.
Returns
-------
indices: tf.Tensor
indices of elements of inputs for which cond is True
tf.Tensors:
filtred inputs and tensors from args.
"""
with tf.variable_scope('filter_tensor'):
if not callable(cond):
callable_cond = lambda x: x > cond
else:
callable_cond = cond
indices = tf.where(callable_cond(inputs))
output = (indices, *[tf.gather_nd(x, indices) for x in [inputs, *args]])
return output