本文整理汇总了Python中tensorflow.strided_slice函数的典型用法代码示例。如果您正苦于以下问题:Python strided_slice函数的具体用法?Python strided_slice怎么用?Python strided_slice使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了strided_slice函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_cifar10
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
示例2: _read_input
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return image, label
示例3: ptb_producer
def ptb_producer(raw_data, batch_size, num_steps, name=None):
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data,
dtype=tf.int32, name="raw_data")
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0: batch_len*batch_size],
[batch_size, batch_len])
epoch_size = (batch_len-1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="batch size too large")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i*num_steps],
[batch_size, (i+1)*num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i*num_steps+1],
[batch_size, (i+1)*num_steps+1])
y.set_shape([batch_size, num_steps])
return x, y
示例4: read_data
def read_data(file_q):
# Code from https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(file_q)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
reshaped_image = tf.cast(result.uint8image, tf.float32)
height = 24
width = 24
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
result.label.set_shape([1])
return float_image, result.label
示例5: read_cifar10
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
示例6: __init__
def __init__(self, **kwargs):
"""
"""
super(AlternatingRealToComplexLayer, self).__init__(**kwargs)
input_placeholder = self.input_data.get_placeholder_as_batch_major()
real_value = tf.strided_slice(input_placeholder, [0, 0, 0], tf.shape(input_placeholder), [1, 1, 2])
imag_value = tf.strided_slice(input_placeholder, [0, 0, 1], tf.shape(input_placeholder), [1, 1, 2])
self.output.placeholder = tf.complex(real_value, imag_value)
self.output.size_placeholder = {0: self.input_data.size_placeholder[self.input_data.time_dim_axis_excluding_batch]}
示例7: _build_clp_multiplication
def _build_clp_multiplication(self, clp_kernel):
from TFUtil import safe_log
input_placeholder = self.input_data.get_placeholder_as_batch_major()
tf.assert_equal(tf.shape(clp_kernel)[1], tf.shape(input_placeholder)[2] // 2)
tf.assert_equal(tf.shape(clp_kernel)[2], self._nr_of_filters)
input_real = tf.strided_slice(input_placeholder, [0, 0, 0], tf.shape(input_placeholder), [1, 1, 2])
input_imag = tf.strided_slice(input_placeholder, [0, 0, 1], tf.shape(input_placeholder), [1, 1, 2])
kernel_real = self._clp_kernel[0, :, :]
kernel_imag = self._clp_kernel[1, :, :]
output_real = tf.einsum('btf,fp->btp', input_real, kernel_real) - tf.einsum('btf,fp->btp', input_imag, kernel_imag)
output_imag = tf.einsum('btf,fp->btp', input_imag, kernel_real) + tf.einsum('btf,fp->btp', input_real, kernel_imag)
output_uncompressed = tf.sqrt(tf.pow(output_real, 2) + tf.pow(output_imag, 2))
output_compressed = safe_log(output_uncompressed)
return output_compressed
示例8: _test_stridedslice
def _test_stridedslice(ip_shape, begin, end, stride, dtype,
begin_mask=0, end_mask=0, new_axis_mask=0,
shrink_axis_mask=0, ellipsis_mask=0):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(in_data, begin, end, stride, begin_mask=begin_mask,
end_mask=end_mask, new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask, name="strided_slice")
np_data = np.random.uniform(size=ip_shape).astype(dtype)
compare_tf_with_tvm(np_data, 'in_data:0', 'strided_slice:0')
示例9: read_cifar10
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height, result.width, result.depth = 32, 32, 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
示例10: process_encoding_input
def process_encoding_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
示例11: AddCrossEntropy
def AddCrossEntropy(batch_size, n):
"""Adds a cross entropy cost function."""
cross_entropies = []
def _Pass():
return tf.constant(0, dtype=tf.float32, shape=[1])
for beam_id in range(batch_size):
beam_gold_slot = tf.reshape(
tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1]), [1])
def _ComputeCrossEntropy():
"""Adds ops to compute cross entropy of the gold path in a beam."""
# Requires a cast so that UnsortedSegmentSum, in the gradient,
# is happy with the type of its input 'segment_ids', which
# must be int32.
idx = tf.cast(
tf.reshape(
tf.where(tf.equal(n['beam_ids'], beam_id)), [-1]), tf.int32)
beam_scores = tf.reshape(tf.gather(n['all_path_scores'], idx), [1, -1])
num = tf.shape(idx)
return tf.nn.softmax_cross_entropy_with_logits(
labels=tf.expand_dims(
tf.sparse_to_dense(beam_gold_slot, num, [1.], 0.), 0),
logits=beam_scores)
# The conditional here is needed to deal with the last few batches of the
# corpus which can contain -1 in beam_gold_slot for empty batch slots.
cross_entropies.append(cf.cond(
beam_gold_slot[0] >= 0, _ComputeCrossEntropy, _Pass))
return {'cross_entropy': tf.div(tf.add_n(cross_entropies), batch_size)}
示例12: objective
def objective(x):
"""Rosenbrock function. (Carl Edward Rasmussen, 2001-07-21).
f(x) = sum_{i=1:D-1} 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2
Args:
x: a Variable
Returns:
f: a tensor (objective value)
"""
d = tf.size(x)
s = tf.add(
100 * tf.square(tf.sub(tf.strided_slice(x, [1], [d]), tf.square(tf.strided_slice(x, [0], [d - 1])))),
tf.square(tf.sub(1.0, tf.strided_slice(x, [0], [d - 1]))),
)
return tf.reduce_sum(s)
示例13: _my_metric_op
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = tf.to_float(labels)
predictions = tf.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return tf.reduce_sum(tf.multiply(predictions, labels))
示例14: process_decoder_input
def process_decoder_input(data, vocab_to_int, batch_size):
'''
补充<GO>,并移除最后一个字符
'''
# cut掉最后一个字符
ending = tf.strided_slice(data, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return decoder_input
示例15: gather
def gather(self, src, force_copy=False):
"""
Fetches the data corresponding to ``src`` from the base array.
Parameters
----------
src : `.TensorSignal`
Signal indicating the data to be read from base array
force_copy : bool
If True, always perform a gather, not a slice (this forces a
copy). Note that setting ``force_copy=False`` does not guarantee
that a copy won't be performed.
Returns
-------
gathered : ``tf.Tensor``
Tensor object corresponding to a dense subset of data from the
base array
"""
logger.debug("gather")
logger.debug("src %s", src)
logger.debug("indices %s", src.indices)
logger.debug("src base %s", self.bases[src.key])
var = self.bases[src.key]
# we prefer to get the data via `strided_slice` or `identity` if
# possible, as it is more efficient
if force_copy or src.tf_slice is None:
result = tf.gather(var, src.tf_indices)
self.read_types["gather"] += 1
elif (src.indices[0] == 0 and
src.indices[-1] == var.get_shape()[0].value - 1 and
len(src.indices) == var.get_shape()[0]):
result = var
self.read_types["identity"] += 1
else:
result = tf.strided_slice(var, *src.tf_slice)
self.read_types["strided_slice"] += 1
# reshape the data according to the shape set in `src`, if there is
# one, otherwise keep the shape of the base array
if result.get_shape() != src.full_shape:
result = tf.reshape(result, src.tf_shape)
# for some reason the shape inference doesn't work in some cases
result.set_shape(src.full_shape)
# whenever we read from an array we use this to mark it as "read"
# (so that any future writes to the array will be scheduled after
# the read)
self.mark_gather(src)
return result