本文整理汇总了Python中tensorflow.slice方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.slice方法的具体用法?Python tensorflow.slice怎么用?Python tensorflow.slice使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.slice方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_fgm_gradient_max
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def test_fgm_gradient_max(self):
input_dim = 2
num_classes = 3
batch_size = 4
rng = np.random.RandomState([2017, 8, 23])
x = tf.placeholder(tf.float32, [batch_size, input_dim])
weights = tf.placeholder(tf.float32, [input_dim, num_classes])
logits = tf.matmul(x, weights)
probs = tf.nn.softmax(logits)
adv_x = fgm(x, probs)
random_example = rng.randint(batch_size)
random_feature = rng.randint(input_dim)
output = tf.slice(adv_x, [random_example, random_feature], [1, 1])
dx, = tf.gradients(output, x)
# The following line catches GitHub issue #243
self.assertIsNotNone(dx)
dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights]))
ground_truth = np.zeros((batch_size, input_dim))
ground_truth[random_example, random_feature] = 1.
self.assertClose(dx, ground_truth)
示例2: _inv_preemphasis
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _inv_preemphasis(x):
N = tf.shape(x)[0]
i = tf.constant(0)
W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)
def condition(i, y):
return tf.less(i, N)
def body(i, y):
tmp = tf.slice(x, [0], [i + 1])
tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
y = hparams.preemphasis * y + tmp
i = tf.add(i, 1)
return [i, y]
final = tf.while_loop(condition, body, [i, W])
y = final[1]
return y
示例3: compute_first_or_last
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def compute_first_or_last(self, select, first=True):
#perform first ot last operation on row select with probabilistic row selection
answer = tf.zeros_like(select)
running_sum = tf.zeros([self.batch_size, 1], self.data_type)
for i in range(self.max_elements):
if (first):
current = tf.slice(select, [0, i], [self.batch_size, 1])
else:
current = tf.slice(select, [0, self.max_elements - 1 - i],
[self.batch_size, 1])
curr_prob = current * (1 - running_sum)
curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
running_sum += curr_prob
temp_ans = []
curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
for i_ans in range(self.max_elements):
if (not (first) and i_ans == self.max_elements - 1 - i):
temp_ans.append(curr_prob)
elif (first and i_ans == i):
temp_ans.append(curr_prob)
else:
temp_ans.append(tf.zeros_like(curr_prob))
temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans))
answer += temp_ans
return answer
示例4: convert_network_state_tensorarray
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def convert_network_state_tensorarray(tensorarray):
"""Converts a source TensorArray to a source Tensor.
Performs a permutation between the steps * [stride, D] shape of a
source TensorArray and the (flattened) [stride * steps, D] shape of
a source Tensor.
The TensorArrays used during recurrence have an additional zeroth step that
needs to be removed.
Args:
tensorarray: TensorArray object to be converted.
Returns:
Tensor object after conversion.
"""
tensor = tensorarray.stack() # Results in a [steps, stride, D] tensor.
tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1]) # Lop off the 0th step.
tensor = tf.transpose(tensor, [1, 0, 2]) # Switch steps and stride.
return tf.reshape(tensor, [-1, tf.shape(tensor)[2]])
示例5: _AddOutputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
"""Adds the output layer and loss function.
Args:
prev_layer: Output of last layer of main network.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
num_classes: Number of outputs/size of last output dimension.
"""
height_in = shapes.tensor_dim(prev_layer, dim=1)
logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func,
num_classes)
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.summary.scalar('loss', self.loss)
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
logging.info('Final output=%s', outputs)
logging.info('Labels tensor=%s', self.labels)
self.output = outputs
示例6: _PadLabels2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _PadLabels2d(logits_size, labels):
"""Pads or slices the 2nd dimension of 2-d labels to match logits_size.
Covers the case of 1-d softmax output, when labels is [batch, seq] and
logits is [batch, seq, onehot]
Args:
logits_size: Tensor returned from tf.shape giving the target size.
labels: 2-d, but not necessarily matching in size.
Returns:
labels: Resized by padding or clipping the last dimension to logits_size.
"""
pad = logits_size - tf.shape(labels)[1]
def _PadFn():
return tf.pad(labels, [[0, 0], [0, pad]])
def _SliceFn():
return tf.slice(labels, [0, 0], [-1, logits_size])
return tf.cond(tf.greater(pad, 0), _PadFn, _SliceFn)
示例7: expanded_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
示例8: combine
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined
示例9: add_positional_embedding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def add_positional_embedding(x, max_length, name, positions=None):
"""Add positional embedding.
Args:
x: a Tensor with shape [batch, length, depth]
max_length: an integer. static maximum size of any dimension.
name: a name for this layer.
positions: an optional tensor with shape [batch, length]
Returns:
a Tensor the same shape as x.
"""
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions))
示例10: _relative_position_to_absolute_position_masked
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _relative_position_to_absolute_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
Only works with masked_attention. Undefined behavior for regions of the
input where memory_position > query_position.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, length]
"""
batch, heads, length, _ = common_layers.shape_list(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [batch, heads, 1 + length, length])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
return x
示例11: _absolute_position_to_relative_position_masked
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _absolute_position_to_relative_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of the output represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
Only works with masked_attention. Undefined behavior for regions of the
input where memory_position > query_position.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, length]
"""
batch, heads, length, _ = common_layers.shape_list(x)
x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]])
x = tf.reshape(x, [batch, heads, length, length + 1])
x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, length])
return x
示例12: get_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
return loss
示例13: get_loss_b
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)
# Use 1024 Points to find loss.
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
# loss = 0
return loss
示例14: omniglot
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def omniglot():
sess = tf.InteractiveSession()
""" def wrapper(v):
return tf.Print(v, [v], message="Printing v")
v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
temp = wrapper(v)
#with tf.control_dependencies([temp]):
temp.eval()
print 'Hello'"""
def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:]
val = tf.cast(val, V.dtype)
def body(_, (v, d2, chg)):
d2_int = tf.cast(d2, tf.int32)
return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
return Z
示例15: _minibatch_subsample_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import slice [as 别名]
def _minibatch_subsample_fn(self, inputs):
"""Randomly samples anchors for one image.
Args:
inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors,
num_classes] indicating targets assigned to each anchor. Second one
is a tensor of shape [num_anchors] indicating the class weight of each
anchor.
Returns:
batch_sampled_indicator: bool tensor of shape [num_anchors] indicating
whether the anchor should be selected for loss computation.
"""
cls_targets, cls_weights = inputs
if self._add_background_class:
# Set background_class bits to 0 so that the positives_indicator
# computation would not consider background class.
background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1]))
regular_class = tf.slice(cls_targets, [0, 1], [-1, -1])
cls_targets = tf.concat([background_class, regular_class], 1)
positives_indicator = tf.reduce_sum(cls_targets, axis=1)
return self._random_example_sampler.subsample(
tf.cast(cls_weights, tf.bool),
batch_size=None,
labels=tf.cast(positives_indicator, tf.bool))