本文整理汇总了Python中tensorflow.assert_rank方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.assert_rank方法的具体用法?Python tensorflow.assert_rank怎么用?Python tensorflow.assert_rank使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.assert_rank方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: assert_scalar
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def assert_scalar(tensor, name):
"""
Whether the `tensor` is a scalar (0-D tensor).
:param tensor: A Tensor to be checked.
:param name: The name of `tensor` for error message.
:return: The checked tensor.
"""
static_shape = tensor.get_shape()
shape_err_msg = name + " should be a scalar (0-D tensor)."
if static_shape and (static_shape.ndims >= 1):
raise ValueError(shape_err_msg)
else:
_assert_shape_op = tf.assert_rank(tensor, 0, message=shape_err_msg)
with tf.control_dependencies([_assert_shape_op]):
tensor = tf.identity(tensor)
return tensor
示例2: tokens_to_bytes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def tokens_to_bytes(tokens):
"""Given a sequence of strings, map to sequence of bytes.
Args:
tokens: A tf.string tensor
Returns:
A tensor of shape words.shape + [bytes_per_word] containing byte versions
of each word.
"""
bytes_per_word = DEFAULT_CHAR_MAXLEN
with tf.device("/cpu:0"):
tf.assert_rank(tokens, 1)
shape = tf.shape(tokens)
tf.logging.info(tokens)
tokens_flat = tf.reshape(tokens, [-1])
as_bytes_flat = tf.map_fn(
fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word),
elems=tokens_flat,
dtype=tf.int32,
back_prop=False)
tf.logging.info(as_bytes_flat)
as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word])
return as_bytes
示例3: log_likelihood_sym
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def log_likelihood_sym(self, x_var, dist_info_vars):
"""
Symbolic log likelihood log p(x) of the distribution
Args:
x_var (tf.Tensor): variable where to evaluate the log likelihood
dist_info_vars (dict) : dict of distribution parameters as tf.Tensor
Returns:
(numpy array): log likelihood
"""
means = dist_info_vars["mean"]
log_stds = dist_info_vars["log_std"]
# assert ranks
tf.assert_rank(x_var, 2), tf.assert_rank(means, 2), tf.assert_rank(log_stds, 2)
zs = (x_var - means) / tf.exp(log_stds)
return - tf.reduce_sum(log_stds, reduction_indices=-1) - \
0.5 * tf.reduce_sum(tf.square(zs), reduction_indices=-1) - \
0.5 * self.dim * np.log(2 * np.pi)
示例4: fc_encoder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def fc_encoder(inputs, hidden_units, dropout, scope=None):
net = inputs
with tf.variable_scope(scope, 'encoder', [inputs]):
tf.assert_rank(inputs, 2)
for layer_id, num_hidden_units in enumerate(hidden_units):
with tf.variable_scope(
'layer_{}'.format(layer_id),
values=(net,)) as layer_scope:
net = tf.contrib.layers.fully_connected(
net,
num_outputs=num_hidden_units,
scope=layer_scope)
if dropout is not None:
net = slim.dropout(net)
add_hidden_layer_summary(net)
net = tf.identity(net, name='output')
return net
示例5: tokens_to_bytes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def tokens_to_bytes(tokens):
"""Given a sequence of strings, map to sequence of bytes.
Args:
tokens: A tf.string tensor
Returns:
A tensor of shape words.shape + [bytes_per_word] containing byte versions
of each word.
"""
bytes_per_word = DEFAULT_CHAR_MAXLEN
with tf.device("/cpu:0"):
tf.assert_rank(tokens, 1)
shape = tf.shape(tokens)
tf.logging.info(tokens)
tokens_flat = tf.reshape(tokens, [-1])
as_bytes_flat = tf.map_fn(
fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word),
elems=tokens_flat,
dtype=tf.int32,
back_prop=False,
)
tf.logging.info(as_bytes_flat)
as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word])
return as_bytes
示例6: assert_positive_int32_scalar
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def assert_positive_int32_scalar(value, name):
"""
Whether `value` is a integer(or 0-D `tf.int32` tensor) and positive.
If `value` is the instance of built-in type, it will be checked directly.
Otherwise, it will be converted to a `tf.int32` tensor and checked.
:param value: The value to be checked.
:param name: The name of `value` used in error message.
:return: The checked value.
"""
if isinstance(value, (int, float)):
if isinstance(value, int) and value > 0:
return value
elif isinstance(value, float):
raise TypeError(name + " must be integer")
elif value <= 0:
raise ValueError(name + " must be positive")
else:
try:
tensor = tf.convert_to_tensor(value, tf.int32)
except (TypeError, ValueError):
raise TypeError(name + ' must be (convertible to) tf.int32')
_assert_rank_op = tf.assert_rank(
tensor, 0,
message=name + " should be a scalar (0-D Tensor).")
_assert_positive_op = tf.assert_greater(
tensor, tf.constant(0, tf.int32),
message=name + " must be positive")
with tf.control_dependencies([_assert_rank_op,
_assert_positive_op]):
tensor = tf.identity(tensor)
return tensor
示例7: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def __init__(self,
logits,
n_experiments,
dtype=tf.int32,
group_ndims=0,
check_numerics=False,
**kwargs):
self._logits = tf.convert_to_tensor(logits)
param_dtype = assert_same_float_dtype(
[(self._logits, 'Binomial.logits')])
assert_dtype_is_int_or_float(dtype)
sign_err_msg = "n_experiments must be positive"
if isinstance(n_experiments, int):
if n_experiments <= 0:
raise ValueError(sign_err_msg)
self._n_experiments = n_experiments
else:
try:
n_experiments = tf.convert_to_tensor(n_experiments, tf.int32)
except ValueError:
raise TypeError('n_experiments must be int32')
_assert_rank_op = tf.assert_rank(
n_experiments, 0,
message="n_experiments should be a scalar (0-D Tensor).")
_assert_positive_op = tf.assert_greater(
n_experiments, 0, message=sign_err_msg)
with tf.control_dependencies([_assert_rank_op,
_assert_positive_op]):
self._n_experiments = tf.identity(n_experiments)
self._check_numerics = check_numerics
super(Binomial, self).__init__(
dtype=dtype,
param_dtype=param_dtype,
is_continuous=False,
is_reparameterized=False,
group_ndims=group_ndims,
**kwargs)
示例8: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def __init__(self,
dtype,
param_dtype,
is_continuous,
is_reparameterized,
use_path_derivative=False,
group_ndims=0,
**kwargs):
if 'group_event_ndims' in kwargs:
raise ValueError(
"The argument `group_event_ndims` has been deprecated "
"Please use `group_ndims` instead.")
self._dtype = dtype
self._param_dtype = param_dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._use_path_derivative = use_path_derivative
if isinstance(group_ndims, int):
if group_ndims < 0:
raise ValueError("group_ndims must be non-negative.")
self._group_ndims = group_ndims
else:
group_ndims = tf.convert_to_tensor(group_ndims, tf.int32)
_assert_rank_op = tf.assert_rank(
group_ndims, 0,
message="group_ndims should be a scalar (0-D Tensor).")
_assert_nonnegative_op = tf.assert_greater_equal(
group_ndims, 0,
message="group_ndims must be non-negative.")
with tf.control_dependencies([_assert_rank_op,
_assert_nonnegative_op]):
self._group_ndims = tf.identity(group_ndims)
示例9: sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def sample(self, n_samples=None):
"""
sample(n_samples=None)
Return samples from the distribution. When `n_samples` is None (by
default), one sample of shape ``batch_shape + value_shape`` is
generated. For a scalar `n_samples`, the returned Tensor has a new
sample dimension with size `n_samples` inserted at ``axis=0``, i.e.,
the shape of samples is ``[n_samples] + batch_shape + value_shape``.
:param n_samples: A 0-D `int32` Tensor or None. How many independent
samples to draw from the distribution.
:return: A Tensor of samples.
"""
if n_samples is None:
samples = self._sample(n_samples=1)
return tf.squeeze(samples, axis=0)
elif isinstance(n_samples, int):
return self._sample(n_samples)
else:
n_samples = tf.convert_to_tensor(n_samples, dtype=tf.int32)
_assert_rank_op = tf.assert_rank(
n_samples, 0,
message="n_samples should be a scalar (0-D Tensor).")
with tf.control_dependencies([_assert_rank_op]):
samples = self._sample(n_samples)
return samples
示例10: add_modality
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def add_modality(self, input_data, input_size, bypass_docking=False):
"""
Add a modality to EmbraceNet.
Args:
input_data: An input data to feed into EmbraceNet. Must be a 2-D tensor of shape [batch_size, input_size].
input_size: The second dimension of input_data.
bypass_docking: Bypass docking step, i.e., connect the input data directly to the embracement layer. If True, input_data must have a shape of [batch_size, embracement_size].
"""
# check input data
tf_assertions = []
tf_assertions.append(tf.assert_rank(input_data, 2))
tf_assertions.append(tf.assert_equal(tf.shape(input_data)[0], self.batch_size))
with tf.control_dependencies(tf_assertions):
input_data = tf.identity(input_data)
with tf.variable_scope('embracenet'):
# construct docking layer
modality_index = len(self.graph.modalities)
modality_graph = EmbraceNetObject()
modality_feeds = EmbraceNetObject()
with tf.variable_scope('docking/%d' % modality_index):
docking_input = input_data
if (bypass_docking):
modality_graph.docking_output = docking_input
else:
docking_output = tf.layers.dense(docking_input, units=self.embracement_size, kernel_initializer=None, bias_initializer=None)
docking_output = tf.nn.relu(docking_output)
modality_graph.docking_output = docking_output
# finalize
self.graph.modalities.append(modality_graph)
self.feeds.modalities.append(modality_feeds)
示例11: separable_conv
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def separable_conv(x, filters, kernel_size, activation):
"""Apply a depthwise separable 1d convolution."""
tf.assert_rank(x, 3)
net = tf.expand_dims(x, 2)
net = tf.layers.separable_conv2d(
net,
filters=filters,
kernel_size=(kernel_size, 1),
padding='same',
activation=activation)
net = tf.squeeze(net, axis=2)
return net
示例12: _assert_tensor_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def _assert_tensor_shape(tensor, shape, display_name):
"""
Check whether the tensor and another shape match in shape
:param tensor: TF Tensor
:param shape: Some array
:param display_name: Name of tensor to print if assertions fail
"""
assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name))
tensor_shape = tensor.get_shape().as_list() if len(shape) else []
wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape)
if cor_dim is not None and ten_dim != cor_dim]
assert not wrong_dimension, \
'{} has wrong shape. Found {}'.format(display_name, tensor_shape)
示例13: test_rank_zero_tensor_raises_if_rank_too_small_static_rank
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(
ValueError, "fail.*my_tensor.*must have rank 1"):
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
tf.identity(tensor).eval()
示例14: test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
示例15: test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assert_rank [as 别名]
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()