本文整理汇总了Python中tensorflow.python.ops.array_ops.shape函数的典型用法代码示例。如果您正苦于以下问题:Python shape函数的具体用法?Python shape怎么用?Python shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _mask_probs
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
示例2: testDtype
def testDtype(self):
with self.test_session():
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
示例3: crf_unary_score
def crf_unary_score(tag_indices, sequence_lengths, inputs):
"""Computes the unary scores of tag sequences.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
sequence_lengths: A [batch_size] vector of true sequence lengths.
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials.
Returns:
unary_scores: A [batch_size] vector of unary scores.
"""
batch_size = array_ops.shape(inputs)[0]
max_seq_len = array_ops.shape(inputs)[1]
num_tags = array_ops.shape(inputs)[2]
flattened_inputs = array_ops.reshape(inputs, [-1])
offsets = array_ops.expand_dims(
math_ops.range(batch_size) * max_seq_len * num_tags, 1)
offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0)
flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1])
unary_scores = array_ops.reshape(
array_ops.gather(flattened_inputs, flattened_tag_indices),
[batch_size, max_seq_len])
masks = _lengths_to_masks(sequence_lengths, array_ops.shape(tag_indices)[1])
unary_scores = math_ops.reduce_sum(unary_scores * masks, 1)
return unary_scores
示例4: _check_labels_and_scores
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.op_scope([boolean_labels, scores], '_check_labels_and_scores'):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
示例5: _BetaincGrad
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
# pylint: disable=protected-access
_, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# pylint: enable=protected-access
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
示例6: _BiasAddGradGrad
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
示例7: testNoOp
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, target_height, target_width,
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = yshape.eval()
self.assertAllEqual(single_shape, newshape)
示例8: call
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
`labels` and `predictions` should have the same shape.
As argmax is being done here, labels and predictions type
can be different.
Args:
labels: One-hot Tensor.
predictions: Tensor with the logits or probabilities for each example.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
check_ops.assert_equal(
array_ops.shape(labels), array_ops.shape(predictions),
message="Shapes of labels and predictions are unequal")
labels = math_ops.argmax(labels, axis=-1)
predictions = math_ops.argmax(predictions, axis=-1)
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, self.dtype)
super(CategoricalAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
示例9: random_crop
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.op_scope([value, size], name, "random_crop") as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = logging_ops.Assert(math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(array_ops.shape(shape), dtype=size.dtype,
maxval=size.dtype.max, seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
示例10: create_zeros_slot
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
if isinstance(primary, variables.Variable):
slot_shape = array_ops.shape(primary.initialized_value())
else:
slot_shape = array_ops.shape(primary)
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
示例11: _check_shapes_dynamic
def _check_shapes_dynamic(self, operator, v, diag):
"""Return (v, diag) with Assert dependencies, which check shape."""
checks = []
with ops.op_scope([operator, v, diag], 'check_shapes'):
s_v = array_ops.shape(v)
r_op = operator.rank()
r_v = array_ops.rank(v)
if diag is not None:
s_d = array_ops.shape(diag)
r_d = array_ops.rank(diag)
# Check tensor rank.
checks.append(check_ops.assert_rank(v, r_op))
if diag is not None:
checks.append(check_ops.assert_rank(diag, r_op - 1))
# Check batch shape
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_v, [0], [r_v - 2])))
if diag is not None:
checks.append(check_ops.assert_equal(
operator.batch_shape(), array_ops.slice(s_d, [0], [r_d - 1])))
# Check event shape
checks.append(check_ops.assert_equal(
operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2)))
if diag is not None:
checks.append(check_ops.assert_equal(
array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1)))
v = control_flow_ops.with_dependencies(checks, v)
if diag is not None:
diag = control_flow_ops.with_dependencies(checks, diag)
return v, diag
示例12: _sliced_wasserstein
def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):
"""Compute the approximate sliced Wasserstein distance.
Args:
a: (matrix) Distribution "a" of samples (row, col).
b: (matrix) Distribution "b" of samples (row, col).
random_sampling_count: (int) Number of random projections to average.
random_projection_dim: (int) Dimension of the random projection space.
Returns:
Float containing the approximate distance between "a" and "b".
"""
s = array_ops.shape(a)
means = []
for _ in range(random_sampling_count):
# Random projection matrix.
proj = random_ops.random_normal(
[array_ops.shape(a)[1], random_projection_dim])
proj *= math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(proj), 0, keepdims=True))
# Project both distributions and sort them.
proj_a = math_ops.matmul(a, proj)
proj_b = math_ops.matmul(b, proj)
proj_a = _sort_rows(proj_a, s[0])
proj_b = _sort_rows(proj_b, s[0])
# Pairwise Wasserstein distance.
wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
means.append(wdist)
return math_ops.reduce_mean(means)
示例13: _do_maximum_mean
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
batch_shape = array_ops.shape(samples)[:-1]
n = array_ops.shape(samples)[-1]
step = 1. / math_ops.cast(n, dtype=samples.dtype.base_dtype)
def _loop_body(iter_, total, to_skip):
total = array_ops.where(
step <= to_skip,
total,
array_ops.where(
to_skip > 0.,
total + (step - to_skip) * samples[..., iter_],
total + step * samples[..., iter_]))
to_skip = array_ops.where(step <= to_skip, to_skip - step, 0.)
return [iter_ + 1, total, to_skip]
_, total, _ = control_flow_ops.while_loop(
cond=lambda iter_, *args: iter_ < n,
body=_loop_body,
loop_vars=[
0,
array_ops.zeros(batch_shape, dtype=samples.dtype.base_dtype),
envelope, # to_skip
])
return total + envelope * high
示例14: _tf_tensor_len
def _tf_tensor_len(s):
"""Overload of len_ for Tensor arguments."""
# Statically shaped tensors: length is known ahead of time.
if s.shape.ndims and s.shape.dims[0].value is not None:
return s.shape.dims[0].value
# Static shape of unknown dimensions: use dynamic shape but statically
# chech that it's a scalar.
shape = array_ops.shape(s)
assert shape.shape, 'shape tensor of zero size? {}'.format(shape)
if shape.shape[0] == 0:
raise ValueError(
'len requires a non-scalar tensor, got one of shape {}'.format(shape))
if shape.shape.dims[0].value is not None:
return array_ops.shape(s)[0]
# Fully dynamic shape: use ops.
rank = array_ops.rank(s)
def raise_zero_rank_error():
msg = gen_string_ops.string_join(
['len requires non-zero rank, got ',
gen_string_ops.as_string(rank)])
with ops.control_dependencies([control_flow_ops.Assert(False, [msg])]):
return constant_op.constant(0, dtype=dtypes.int32)
return control_flow_ops.cond(rank > 0, lambda: array_ops.shape(s)[0],
raise_zero_rank_error)
示例15: _apply_transform
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
d = input_tensors[0]
if self.strip_value is np.nan:
strip_hot = math_ops.is_nan(d)
else:
strip_hot = math_ops.equal(d,
array_ops.constant([self.strip_value],
dtype=d.dtype))
keep_hot = math_ops.logical_not(strip_hot)
length = array_ops.reshape(array_ops.shape(d), [])
indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
values = array_ops.boolean_mask(d, keep_hot)
sparse_indices = array_ops.reshape(
math_ops.cast(indices, dtypes.int64), [-1, 1])
shape = math_ops.cast(array_ops.shape(d), dtypes.int64)
# pylint: disable=not-callable
return self.return_type(ops.SparseTensor(sparse_indices, values, shape))