本文整理汇总了Python中tensorflow.TensorArray方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.TensorArray方法的具体用法?Python tensorflow.TensorArray怎么用?Python tensorflow.TensorArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.TensorArray方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConvertNetworkStateTensorarray
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def testConvertNetworkStateTensorarray(self):
with self.test_session() as session:
ta = tf.TensorArray(
dtype=tf.float32,
size=0,
dynamic_size=True,
clear_after_read=False,
infer_shape=False)
# Create a 3-step x 2-stride x 2-feature-dim source array.
ta = ta.write(0, [[0., 0.]] * 2) # The zeroth step will be removed.
ta = ta.write(1, [[1., 10.]] * 2)
ta = ta.write(2, [[2., 20.]] * 2)
ta = ta.write(3, [[3., 30.]] * 2)
tensor = network_units.convert_network_state_tensorarray(ta)
actual = session.run(tensor)
self.assertEqual(actual.shape, (6, 2))
# The arrangement of the values is expected to be stride * steps.
expected = [[1., 10.], [2., 20.], [3., 30.], [1., 10.], [2., 20.],
[3., 30.]]
self.assertAllEqual(actual, expected)
示例2: _check_static_batch_beam_maybe
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1]."""
reshaped_shape = tf.TensorShape([batch_size, beam_width, None])
assert len(shape.dims) > 0
if batch_size is None or shape[0] is None:
return True # not statically known => no check
if shape[0] == batch_size * beam_width:
return True # flattened, matching
has_second_dim = shape.ndims >= 2 and shape[1] is not None
if has_second_dim and shape[0] == batch_size and shape[1] == beam_width:
return True # non-flattened, matching
# Otherwise we could not find a match and warn:
tf.get_logger().warn(
"TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search." % (reshaped_shape, shape)
)
return False
示例3: _maybe_merge_batch_beams
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension
`[batch_size * beam_width] + s`, then we reshape it to
`[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tf.TensorArray):
return t
_check_ndims(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
示例4: mix_target_sequence
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def mix_target_sequence(self, gold_token, predicted_token, training, top_k=5):
""" to mix gold token and prediction
param gold_token: true labels
param predicted_token: predictions by first pass
return: mix of the gold_token and predicted_token
"""
mix_result = tf.TensorArray(
tf.float32, size=1, dynamic_size=True, clear_after_read=False
)
for i in tf.range(tf.shape(gold_token)[-1]):
if self.random_num([1]) > self.hparams.schedual_sampling_rate:# do schedual sampling
selected_input = predicted_token[:, i, :]
selected_idx = tf.nn.top_k(selected_input, top_k).indices
embedding_input = self.y_net.layers[1](selected_idx, training=training)
embedding_input = tf.reduce_mean(embedding_input, axis=1)
mix_result = mix_result.write(i, embedding_input)
else:
selected_input = tf.reshape(gold_token[:, i], [-1, 1])
embedding_input = self.y_net.layers[1](selected_input, training=training)
mix_result = mix_result.write(i, embedding_input[:, 0, :])
final_input = self.y_net.layers[2](tf.transpose(mix_result.stack(), [1, 0, 2]),
training=training)
final_input = self.y_net.layers[3](final_input, training=training)
return final_input
示例5: tas_for_tensors
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def tas_for_tensors(tensors, length):
"""Unstacks a set of Tensors into TensorArrays.
Args:
tensors: A potentially nested tuple or list of Tensors with length in the
first dimension greater than or equal to the 'length' input argument.
length: The desired length of the TensorArrays.
Returns:
tensorarrays: A potentially nested tuple or list of TensorArrays with the
same structure as 'tensors'. Contains the result of unstacking each Tensor
in 'tensors'.
"""
def map_fn(x):
ta = tf.TensorArray(x.dtype, length, name=x.name.split(':')[0] + '_ta')
return ta.unstack(x[:length, :])
return map_nested(map_fn, tensors)
示例6: extend_prefixes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def extend_prefixes(prefixes_to_extend, possible_prefix_extensions):
"""Extends prefixes in `prefixes_to_extend` by `possible_prefix_extensions`.
Args:
prefixes_to_extend: A 1D tf.string containing prefixes to be extended.
possible_prefix_extensions: A 1D tf.string containing all possible prefix
extensions.
Returns:
A 1D tf.string containing all the extended prefixes.
"""
num_new_prefixes = tf.shape(prefixes_to_extend)[0] * tf.shape(
possible_prefix_extensions)[0]
extended_prefixes = tf.TensorArray(dtype=tf.string, size=num_new_prefixes)
position = tf.constant(0, dtype=tf.int32)
for prefix in prefixes_to_extend:
for possible_extension in possible_prefix_extensions:
# [-1] is passed to tf.reshape to flatten the extended prefix. This is
# important to ensure consistency of shapes.
extended_prefix = tf.reshape(
tf.strings.reduce_join([prefix, possible_extension]), [-1])
extended_prefixes = extended_prefixes.write(position, extended_prefix)
position += 1
return extended_prefixes.concat()
示例7: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def call(self, inputs):
x, seq_length = inputs
x_list = tf.TensorArray(dtype=tf.float32, size=seq_length)
x_list = x_list.unstack(tf.transpose(x, perm=[1, 0, 2]))
state = self.cell.get_initial_state(batch_size=self.batch_size, dtype=tf.float32)
for t in range(seq_length):
output, state = self.cell(tf.concat([x_list.read(t), tf.zeros([self.batch_size, 1])], axis=1), state)
output, state = self.cell(self.eof, state)
output_list = tf.TensorArray(dtype=tf.float32, size=seq_length)
for t in range(seq_length):
output, state = self.cell(self.zero, state)
output_list = output_list.write(t, output[:, 0:self.vector_dim])
y_pred = tf.sigmoid(tf.transpose(output_list.stack(), perm=[1, 0, 2]))
return y_pred
示例8: _maybe_split_batch_beams
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tf.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
示例9: _maybe_merge_batch_beams
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tf.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
示例10: _check_batch_beam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = tf.shape(t)
if rank == 2:
condition = tf.equal(shape[1], batch_size * beam_width)
else:
condition = tf.logical_or(
tf.equal(shape[1], batch_size * beam_width),
tf.logical_and(
tf.equal(shape[1], batch_size),
tf.equal(shape[2], beam_width)))
return tf.Assert(condition, [error_message])
示例11: _check_static_batch_beam_maybe
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tf.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf.logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
示例12: non_max_suppression
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def non_max_suppression(inputs, scores, batch_size, max_output_size,
score_threshold=0.7, iou_threshold=0.7, nonempty=False, name='nms'):
""" Perform NMS on batch of images. """
with tf.variable_scope(name):
ix = tf.constant(0)
filtered_rois = tf.TensorArray(dtype=tf.int32, size=batch_size, infer_shape=False)
loop_cond = lambda ix, filtered_rois: tf.less(ix, batch_size)
def _loop_body(ix, filtered_rois):
indices, score, roi = _filter_tensor(scores[ix], score_threshold, inputs[ix]) # pylint: disable=unbalanced-tuple-unpacking
roi_corners = tf.concat([roi[:, :2], roi[:, :2]+roi[:, 2:]], axis=-1)
roi_after_nms = tf.image.non_max_suppression(roi_corners, score, max_output_size, iou_threshold)
if nonempty:
is_not_empty = lambda: filtered_rois.write(ix,
tf.cast(tf.gather(indices, roi_after_nms),
dtype=tf.int32))
is_empty = lambda: filtered_rois.write(ix, tf.constant([[0]]))
filtered_rois = tf.cond(tf.not_equal(tf.shape(indices)[0], 0), is_not_empty, is_empty)
else:
filtered_rois = filtered_rois.write(ix, tf.cast(tf.gather(indices, roi_after_nms), dtype=tf.int32))
return [ix+1, filtered_rois]
_, res = tf.while_loop(loop_cond, _loop_body, [ix, filtered_rois])
res = _array_to_tuple(res, batch_size, [-1, 1])
return res
示例13: _compute_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _compute_gradients(self, loss_fn, x, unused_optim_state):
"""Compute gradient estimates using SPSA."""
# Assumes `x` is a list, containing a [1, H, W, C] image
assert len(x) == 1 and x[0].get_shape().as_list()[0] == 1
x = x[0]
x_shape = x.get_shape().as_list()
def body(i, grad_array):
delta = self._delta
delta_x = self._get_delta(x, delta)
delta_x = tf.concat([delta_x, -delta_x], axis=0)
loss_vals = tf.reshape(
loss_fn(x + delta_x),
[2 * self._num_samples] + [1] * (len(x_shape) - 1))
avg_grad = reduce_mean(loss_vals * delta_x, axis=0) / delta
avg_grad = tf.expand_dims(avg_grad, axis=0)
new_grad_array = grad_array.write(i, avg_grad)
return i + 1, new_grad_array
def cond(i, _):
return i < self._num_iters
_, all_grads = tf.while_loop(
cond,
body,
loop_vars=[
0, tf.TensorArray(size=self._num_iters, dtype=tf_dtype)
],
back_prop=False,
parallel_iterations=1)
avg_grad = reduce_sum(all_grads.stack(), axis=0)
return [avg_grad]
示例14: transform_targets_for_output
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def transform_targets_for_output(y_true, grid_y, grid_x, anchor_idxs, classes):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros((N, grid_y, grid_x, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2.
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_size = tf.cast(tf.stack([grid_x, grid_y], axis=-1), tf.float32)
grid_xy = tf.cast(box_xy * grid_size, tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
y_ture_out = tf.tensor_scatter_nd_update(y_true_out, indexes.stack(), updates.stack())
return y_ture_out
示例15: _compute_states
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import TensorArray [as 别名]
def _compute_states(self):
""" Compute hidden states.
Returns:
A tuple, (outputs, states).
"""
_inputs = tf.transpose(self.inputs, [1, 0, 2])
x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
h_ta = tf.TensorArray(tf.float32, size=self.length)
def cond(t, h, h_ta):
return tf.less(t, self.length)
def body(t, h, h_ta):
x = x_ta.read(t)
num_units, input_size = self.num_hidden_units, self.input_size
with tf.variable_scope('simple_rnn'):
h_new = self.activation(self._linear(h, x, num_units, scope='simple_rnn'))
h_ta_new = h_ta.write(t, h_new)
return t + 1, h_new, h_ta_new
t = tf.constant(0)
h = tf.squeeze(self.initial_states, [1])
_, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])
states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
outputs = tf.identity(states, name='outputs')
return outputs, states