本文整理汇总了Python中tensorflow.to_int64函数的典型用法代码示例。如果您正苦于以下问题:Python to_int64函数的具体用法?Python to_int64怎么用?Python to_int64使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_int64函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unpool_layer2x2
def unpool_layer2x2(self, x, raveled_argmax, out_shape):
argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])
height = tf.shape(output)[0]
width = tf.shape(output)[1]
channels = tf.shape(output)[2]
t1 = tf.to_int64(tf.range(channels))
t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
t1 = tf.reshape(t1, [-1, channels])
t1 = tf.transpose(t1, perm=[1, 0])
t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])
t2 = tf.squeeze(argmax)
t2 = tf.pack((t2[0], t2[1]), axis=0)
t2 = tf.transpose(t2, perm=[3, 1, 2, 0])
t = tf.concat(3, [t2, t1])
indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])
x1 = tf.squeeze(x)
x1 = tf.reshape(x1, [-1, channels])
x1 = tf.transpose(x1, perm=[1, 0])
values = tf.reshape(x1, [-1])
delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
示例2: unpool_layer2x2_batch
def unpool_layer2x2_batch(self, bottom, argmax):
bottom_shape = tf.shape(bottom)
top_shape = [bottom_shape[0], bottom_shape[1] * 2, bottom_shape[2] * 2, bottom_shape[3]]
batch_size = top_shape[0]
height = top_shape[1]
width = top_shape[2]
channels = top_shape[3]
argmax_shape = tf.to_int64([batch_size, height, width, channels])
argmax = self.unravel_argmax(argmax, argmax_shape)
t1 = tf.to_int64(tf.range(channels))
t1 = tf.tile(t1, [batch_size * (width // 2) * (height // 2)])
t1 = tf.reshape(t1, [-1, channels])
t1 = tf.transpose(t1, perm=[1, 0])
t1 = tf.reshape(t1, [channels, batch_size, height // 2, width // 2, 1])
t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])
t2 = tf.to_int64(tf.range(batch_size))
t2 = tf.tile(t2, [channels * (width // 2) * (height // 2)])
t2 = tf.reshape(t2, [-1, batch_size])
t2 = tf.transpose(t2, perm=[1, 0])
t2 = tf.reshape(t2, [batch_size, channels, height // 2, width // 2, 1])
t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])
t = tf.concat(4, [t2, t3, t1])
indices = tf.reshape(t, [(height // 2) * (width // 2) * channels * batch_size, 4])
x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
values = tf.reshape(x1, [-1])
return tf.scatter_nd(indices, values, tf.to_int64(top_shape))
示例3: one_hot_matrix
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
"""Encodes indices from given tensor as one-hot tensor.
TODO(ilblackdragon): Ideally implementation should be
part of TensorFlow with Eigen-native operation.
Args:
tensor_in: Input tensor of shape [N1, N2].
num_classes: Number of classes to expand index into.
on_value: Tensor or float, value to fill-in given index.
off_value: Tensor or float, value to fill-in everything else.
Returns:
Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
tensor.
"""
tensor_in = tf.convert_to_tensor(tensor_in)
sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
size = tf.shape(sparse_values)[0]
dims = tf.shape(tensor_in)
indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
indices_values = tf.concat(1, [indices, sparse_values])
outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
return ret
示例4: _build_once
def _build_once(self, dataset, feature_transformer):
with tf.device(self._local_device):
tr_batch = dataset()
te_batch = dataset()
num_classes = tr_batch.label_onehot.shape.as_list()[1]
all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0),
[tr_batch, te_batch])
features = feature_transformer(all_batch)
trX, teX = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0),
features)
trY = tf.to_int64(tr_batch.label)
trY_onehot = tf.to_int32(tr_batch.label_onehot)
teY = tf.to_int64(te_batch.label)
teY_shape = teY.shape.as_list()
def blackbox((trX, trY, teX, teY)):
trY = tf.to_int32(tf.rint(trY))
teY = tf.to_int32(tf.rint(teY))
tf_fn = build_fit(
self._local_device,
self._get_model,
num_classes=num_classes,
probs=self.probs)
if self.probs:
trP, teP, teP_probs = tf_fn(trX, trY, teX)
else:
trP, teP = tf_fn(trX, trY, teX)
teY.set_shape(teY_shape)
if self.probs:
onehot = tf.one_hot(teY, num_classes)
crossent = -tf.reduce_sum(onehot * teP_probs, [1])
return tf.reduce_mean(crossent)
else:
# use error rate as the loss if no surrogate is avalible.
return 1 - tf.reduce_mean(
tf.to_float(tf.equal(teY, tf.to_int32(teP))))
test_loss = blackbox((trX, tf.to_float(trY), teX, tf.to_float(teY)))
stats = {}
tf_fn = build_fit(
self._local_device,
self._get_model,
num_classes=num_classes,
probs=self.probs)
if self.probs:
trP, teP, teP_probs = tf_fn(trX, trY, teX)
else:
trP, teP = tf_fn(trX, trY, teX)
stats["%s/accuracy_train" % self.name] = tf.reduce_mean(
tf.to_float(tf.equal(tf.to_int32(trY), tf.to_int32(trP))))
stats["%s/accuracy_test" % self.name] = tf.reduce_mean(
tf.to_float(tf.equal(tf.to_int32(teY), tf.to_int32(teP))))
stats["%s/test_loss" % self.name] = test_loss
return test_loss, stats
示例5: preprocess_example
def preprocess_example(self, example, mode, unused_hparams):
# Just resize with area.
if self._was_reversed:
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], [32, 32],
tf.image.ResizeMethod.AREA))
else:
example = imagenet_preprocess_example(example, mode)
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], [32, 32]))
return example
示例6: build
def build(self):
print('Building model')
self.x_embeddings = tf.Variable(
tf.random_normal([self.alphabet_src_size, self.embedd_dims],
stddev=0.1), name='x_embeddings')
self.t_embeddings = tf.Variable(
tf.random_normal([self.alphabet_tar_size, self.embedd_dims],
stddev=0.1), name='t_embeddings')
X_embedded = tf.gather(self.x_embeddings, self.Xs, name='embed_X')
t_embedded = tf.gather(self.t_embeddings, self.ts_go, name='embed_t')
with tf.variable_scope('dense_out'):
W_out = tf.get_variable('W_out', [self.word_encoder_units*2, self.alphabet_tar_size])
b_out = tf.get_variable('b_out', [self.alphabet_tar_size])
# forward encoding
char_enc_state, char_enc_out = encoder(X_embedded, self.X_len, 'char_encoder', self.char_encoder_units)
char2word = _grid_gather(char_enc_out, self.X_spaces)
char2word.set_shape([None, None, self.char_encoder_units])
word_enc_state, word_enc_out = encoder(char2word, self.X_spaces_len, 'word_encoder', self.word_encoder_units)
# backward encoding words
char2word = tf.reverse_sequence(char2word, tf.to_int64(self.X_spaces_len), 1)
char2word.set_shape([None, None, self.char_encoder_units])
word_enc_state_bck, word_enc_out_bck = encoder(char2word, self.X_spaces_len, 'word_encoder_backwards', self.word_encoder_units)
word_enc_out_bck = tf.reverse_sequence(word_enc_out_bck, tf.to_int64(self.X_spaces_len), 1)
word_enc_state = tf.concat(1, [word_enc_state, word_enc_state_bck])
word_enc_out = tf.concat(2, [word_enc_out, word_enc_out_bck])
# decoding
dec_state, dec_out, valid_dec_out, valid_attention_tracker = (
attention_decoder(word_enc_out, self.X_spaces_len, word_enc_state,
t_embedded, self.t_len, self.attn_units,
self.t_embeddings, W_out, b_out))
out_tensor = tf.reshape(dec_out, [-1, self.word_encoder_units*2])
out_tensor = tf.matmul(out_tensor, W_out) + b_out
out_shape = tf.concat(0, [tf.expand_dims(tf.shape(self.X_len)[0], 0),
tf.expand_dims(tf.shape(t_embedded)[1], 0),
tf.expand_dims(tf.constant(self.alphabet_tar_size), 0)])
self.valid_attention_tracker = valid_attention_tracker.pack()
self.out_tensor = tf.reshape(out_tensor, out_shape)
self.out_tensor.set_shape([None, None, self.alphabet_tar_size])
valid_out_tensor = tf.reshape(valid_dec_out, [-1, self.word_encoder_units*2])
valid_out_tensor = tf.matmul(valid_out_tensor, W_out) + b_out
self.valid_out_tensor = tf.reshape(valid_out_tensor, out_shape)
self.out = None
# add TensorBoard summaries for all variables
tf.contrib.layers.summarize_variables()
示例7: preprocess_example
def preprocess_example(self, example, mode, _):
# Just resize with area.
if self._was_reversed:
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], self.rescale_size,
tf.image.ResizeMethod.AREA))
else:
example = imagenet_preprocess_example(example, mode)
example["inputs"] = tf.to_int64(
tf.image.resize_images(example["inputs"], self.rescale_size))
return example
示例8: model_single
def model_single(input_dims, output_dims, scale_frac, scales, nkNN):
"""
Forms the knn model.
Arguments:
input_dim -- the dimension of the input data
output_dim -- the number of classes
scale_frac -- the fraction of events to use for finding widths
scales -- list of distribution widths for each dimension
nkNN -- the number of nearest neighbours to find
Returns:
A tensor with the number of neighbours in each class.
"""
training = tf.placeholder(tf.float32, shape=(None, input_dims))
one_hot = tf.placeholder(tf.float32, shape=(None, output_dims))
test = tf.placeholder(tf.float32, shape=(1, input_dims))
distances = metric_single(training, test, scale_frac, scales)
remaining_training = tf.identity(training)
remaining_one_hot = tf.identity(one_hot)
remaining_distances = tf.identity(distances)
for i in range(nkNN):
# Gets the location of training entry currently closest to the test
# entry.
min_slice = tf.to_int64(tf.concat(0, [tf.argmin(remaining_distances, 0), [-1]]))
# Cuts the nearest neighbour out of the training set.
start = tf.slice(remaining_training, tf.to_int64([0, 0]), min_slice)
end = tf.slice(remaining_training, min_slice + [1, 1], [-1, -1])
remaining_training = tf.concat(0, [start, end])
# Cuts the nearest neighbour out of the distances set.
start = tf.slice(remaining_distances, tf.to_int64([0, 0]), min_slice)
end = tf.slice(remaining_distances, min_slice + [1, 1], [-1, -1])
remaining_distances = tf.concat(0, [start, end])
# Cuts the nearest neighbour's class and records it.
start = tf.slice(remaining_one_hot, tf.to_int64([0, 0]), min_slice)
end = tf.slice(remaining_one_hot, min_slice + [1, 1], [-1, -1])
class_slice = tf.slice(remaining_one_hot, min_slice + [0, 1], [1, -1])
remaining_one_hot = tf.concat(0, [start, end])
if i == 0:
neighbour_one_hot = class_slice
else:
neighbour_one_hot = tf.concat(0, [neighbour_one_hot, class_slice])
return training, one_hot, test, tf.reduce_sum(neighbour_one_hot, reduction_indices=0)
示例9: tensors_to_item
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[time, num_boxes, 4] tensor of bounding box coordinates, in order
[y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
or a dense Tensor is determined by the return_dense parameter. Empty
positions in the sparse tensor are filled with -1.0 values.
"""
sides = []
for key in self._full_keys:
value = keys_to_tensors[key]
expanded_dims = tf.concat(
[tf.to_int64(tf.shape(value)),
tf.constant([1], dtype=tf.int64)], 0)
side = tf.sparse_reshape(value, expanded_dims)
sides.append(side)
bounding_boxes = tf.sparse_concat(2, sides)
if self._return_dense:
bounding_boxes = tf.sparse_tensor_to_dense(
bounding_boxes, default_value=self._default_value)
return bounding_boxes
示例10: preprocess_example
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
example["inputs"] = tf.to_int64(example["inputs"])
if mode == tf.estimator.ModeKeys.TRAIN:
example["inputs"] = image_utils.random_shift(
example["inputs"], wsr=0.1, hsr=0.1)
return example
示例11: mnist_training
def mnist_training(logits, labels, learning_rate):
"""Build the training graph.
Args:
logits: Logits tensor, float - [BATCH_SIZE, NUM_CLASSES].
labels: Labels tensor, int32 - [BATCH_SIZE], with values in the
range [0, NUM_CLASSES).
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
loss: The Op for calculating loss.
"""
# Create an operation that calculates loss.
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
# Uncomment the following line to see what we have constructed.
# tf.train.write_graph(tf.get_default_graph().as_graph_def(),
# "/tmp", "train.pbtxt", as_text=True)
return train_op, loss
示例12: _compute_sparse_average_correct
def _compute_sparse_average_correct(input_, labels, per_example_weights, topk=1):
"""Returns the numerator and denominator of classifier accuracy."""
labels = tf.to_int64(labels)
labels.get_shape().assert_is_compatible_with([input_.get_shape()[0], None])
if topk == 1:
predictions = tf.reshape(tf.argmax(input_, 1), [-1, 1])
in_topk = tf.reduce_any(tf.equal(labels, predictions), reduction_indices=[1])
else:
# Use broadcasting to check if ANY of the predictions are in the top k.
# TODO(eiderman): For a multi-label top k, what does accuracy mean?
predictions = tf.reshape(tf.nn.top_k(input_, topk)[1], [-1, 1, topk])
labels = tf.expand_dims(labels, [-1])
in_topk = tf.reduce_any(tf.equal(tf.cast(labels, predictions.dtype), predictions), reduction_indices=[1, 2])
correct_predictions = tf.to_float(in_topk)
# If individual examples are weighted, then we want to normalize by that.
if per_example_weights is not None:
per_example_weights = _convert_and_assert_per_example_weights_compatible(
input_, per_example_weights, dtype=None
)
float_weights = tf.to_float(per_example_weights)
# TODO(eiderman): This should use an op that doesn't support broadcasting.
correct_predictions *= float_weights
num_examples = tf.reduce_sum(float_weights)
else:
# shape only holds ints, but we want to always return the same type
# for num_examples to make everything compatible.
num_examples = tf.to_float(tf.gather(tf.shape(input_), 0))
return tf.reduce_sum(correct_predictions), num_examples
示例13: main
def main(args):
# load the dataset
dataset = mnist.get_split('test', FLAGS.data_dir)
# load batch
images, labels = load_batch(
dataset,
FLAGS.batch_size,
is_training=False)
# get the model prediction
predictions = lenet(images)
# convert prediction values for each class into single class prediction
predictions = tf.to_int64(tf.argmax(predictions, 1))
# streaming metrics to evaluate
metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
'mse': metrics.streaming_mean_squared_error(predictions, labels),
'accuracy': metrics.streaming_accuracy(predictions, labels),
})
# write the metrics as summaries
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
# evaluate on the model saved at the checkpoint directory
# evaluate every eval_interval_secs
slim.evaluation.evaluation_loop(
'',
FLAGS.checkpoint_dir,
FLAGS.log_dir,
num_evals=FLAGS.num_evals,
eval_op=metrics_to_updates.values(),
eval_interval_secs=FLAGS.eval_interval_secs)
示例14: generate_single_output
def generate_single_output(encoder_state, attention_states, sequence_length,
targets, num_classes, buckets,
use_mean_attention=False,
softmax_loss_function=None, per_example_loss=False,
name=None, use_attention=False):
all_inputs = targets
with tf.name_scope(name, "model_with_buckets", all_inputs):
with tf.variable_scope(tf.get_variable_scope(),
reuse=None):
single_outputs = attention_single_output_decoder(encoder_state,
attention_states,
output_size=num_classes,
num_heads=1,
sequence_length=sequence_length,
use_attention=use_attention)
_, _, _, bucket_outputs = single_outputs
if softmax_loss_function is None:
assert len(bucket_outputs) == len(targets) == 1
# We need to make target and int64-tensor and set its shape.
bucket_target = tf.reshape(tf.to_int64(targets[0]), [-1])
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=bucket_outputs[0], labels=bucket_target)
else:
assert len(bucket_outputs) == len(targets) == 1
crossent = softmax_loss_function(bucket_outputs[0], targets[0])
batch_size = tf.shape(targets[0])[0]
loss = tf.reduce_sum(crossent) / tf.cast(batch_size, tf.float32)
return bucket_outputs, loss
示例15: make_multiscale_dilated
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images