本文整理汇总了Python中tensorflow.int64方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.int64方法的具体用法?Python tensorflow.int64怎么用?Python tensorflow.int64使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.int64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_from_tfrecord
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def read_from_tfrecord(filenames):
tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
reader = tf.TFRecordReader()
_, tfrecord_serialized = reader.read(tfrecord_file_queue)
tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
'label': tf.FixedLenFeature([],tf.int64),
'shape': tf.FixedLenFeature([],tf.string),
'image': tf.FixedLenFeature([],tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
image = tf.reshape(image, shape)
label = tfrecord_features['label']
return label, shape, image
示例2: build_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
示例3: _read_single_sequence_example
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def _read_single_sequence_example(file_list, tokens_shape=None):
"""Reads and parses SequenceExamples from TFRecord-encoded file_list."""
tf.logging.info('Constructing TFRecordReader from files: %s', file_list)
file_queue = tf.train.string_input_producer(file_list)
reader = tf.TFRecordReader()
seq_key, serialized_record = reader.read(file_queue)
ctx, sequence = tf.parse_single_sequence_example(
serialized_record,
sequence_features={
data_utils.SequenceWrapper.F_TOKEN_ID:
tf.FixedLenSequenceFeature(tokens_shape or [], dtype=tf.int64),
data_utils.SequenceWrapper.F_LABEL:
tf.FixedLenSequenceFeature([], dtype=tf.int64),
data_utils.SequenceWrapper.F_WEIGHT:
tf.FixedLenSequenceFeature([], dtype=tf.float32),
})
return seq_key, ctx, sequence
示例4: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例5: build_cross_entropy_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def build_cross_entropy_loss(logits, gold):
"""Constructs a cross entropy from logits and one-hot encoded gold labels.
Supports skipping rows where the gold label is the magic -1 value.
Args:
logits: float Tensor of scores.
gold: int Tensor of one-hot labels.
Returns:
cost, correct, total: the total cost, the total number of correctly
predicted labels, and the total number of valid labels.
"""
valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
gold = tf.gather(gold, valid)
logits = tf.gather(logits, valid)
correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
total = tf.size(gold)
cost = tf.reduce_sum(
tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits(
logits, tf.cast(gold, tf.int64))) / tf.cast(total, tf.float32)
return cost, correct, total
示例6: global_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
示例7: test_indices_to_dense_vector_int
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def test_indices_to_dense_vector_int(self):
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices, size, 1, dtype=tf.int64)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
示例8: build_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def build_inputs(self):
if self.mode == "inference":
# Inference mode doesn't read from disk, so defer to parent.
return super(ShowAndTellModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.images = tf.random_uniform(
shape=[self.config.batch_size, self.config.image_height,
self.config.image_width, 3],
minval=-1,
maxval=1)
self.input_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.target_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.input_mask = tf.ones_like(self.input_seqs)
示例9: ones_matrix_band_part
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones."""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
示例10: add_task_id
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def add_task_id(self, task, example):
"""Convert example to code switching mode by adding a task id."""
if hasattr(task, "class_labels"):
# TODO(urvashik): handle the case where num_labels > 9
example["targets"] = tf.cast(discretization.int_to_bit(
example["targets"], 1, base=10) + 50, tf.int64)
example["targets"] = tf.squeeze(example["targets"], axis=[-1])
if task.has_inputs:
inputs = example.pop("inputs")
concat_list = [inputs, [task.task_id], example["targets"]]
else:
concat_list = [[task.task_id], example["targets"]]
example["targets"] = tf.concat(concat_list, 0)
return example
示例11: extra_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
# TODO(piotrmilos): shouldn't done be included here?
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
"action": tf.FixedLenFeature([1], tf.int64),
"reward": tf.FixedLenFeature([1], tf.int64)
}
decoders = {
"frame_number":
tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="frame_number"),
"action":
tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="action"),
"reward":
tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="reward"),
}
return data_fields, decoders
示例12: decode_example
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def decode_example(self, serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields, data_items_to_decoders = self.example_reading_spec()
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0)
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
return dict(zip(decode_items, decoded))
示例13: serving_input_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def serving_input_fn(self, hparams):
"""Input fn for serving export, starting from serialized example."""
mode = tf.estimator.ModeKeys.PREDICT
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
dataset = tf.data.Dataset.from_tensor_slices(serialized_example)
dataset = dataset.map(self.decode_example)
dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams))
dataset = dataset.map(self.maybe_reverse_and_copy)
dataset = dataset.map(data_reader.cast_ints_to_int32)
dataset = dataset.padded_batch(
tf.shape(serialized_example, out_type=tf.int64)[0],
dataset.output_shapes)
dataset = dataset.map(standardize_shapes)
features = tf.contrib.data.get_single_element(dataset)
if self.has_inputs:
features.pop("targets", None)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def __init__(self, pc: _Network3D, config, centers, sess, freqs_resolution=1e9):
"""
:param sess: Must be set at the latest before using get_pr or get_freqs
"""
self.pc_class = pc.__class__
self.config = config
self.input_ctx_shape = self.pc_class.get_context_shape(config)
self.input_ctx = tf.placeholder(tf.int64, self.input_ctx_shape) # symbols!
input_ctx_batched = tf.expand_dims(self.input_ctx, 0) # add batch dimension, 1DHW
input_ctx_batched = tf.expand_dims(input_ctx_batched, -1) # add T dimension for 3d conv, now 1CHW1
# Here, in contrast to pc.bitcost(...), q does not need to be padded, as it is part of some context.
# Logits will be a 1111L vector, i.e., prediction of the next pixel
q = tf.gather(centers, input_ctx_batched)
logits = pc.logits(q, is_training=False)
self.pr = tf.nn.softmax(logits)
self.freqs = tf.squeeze(tf.cast(self.pr * freqs_resolution, tf.int64))
self.sess = sess
self._get_freqs = None
示例15: parse_tfrecord_tf
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import int64 [as 别名]
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])