本文整理汇总了Python中tensorflow.python.ops.math_ops.to_int64函数的典型用法代码示例。如果您正苦于以下问题:Python to_int64函数的具体用法?Python to_int64怎么用?Python to_int64使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_int64函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_eval_ops
def _get_eval_ops(self, features, targets, metrics):
features, _, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
_assert_float32(features)
_assert_float32(labels)
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner, training=False,
**self.construction_args)
probabilities = graph_builder.inference_graph(features, data_spec=spec)
# One-hot the labels.
if not self.params.regression:
labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
array_ops.squeeze(labels)), self.params.num_classes, 1, 0))
if metrics is None:
metrics = {self.accuracy_metric:
eval_metrics.get_metric(self.accuracy_metric)}
result = {}
for name, metric in six.iteritems(metrics):
result[name] = metric(probabilities, labels)
return result
示例2: generate_sequence_output
def generate_sequence_output(num_encoder_symbols,
encoder_outputs, encoder_state, targets,sequence_length, num_decoder_symbols, weights,
buckets, softmax_loss_function=None,
per_example_loss=False, name=None, use_attention=False):
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
all_inputs = encoder_outputs + targets + weights
with ops.op_scope(all_inputs, name, "model_with_buckets"):
with variable_scope.variable_scope("decoder_sequence_output", reuse=None):
logits, attention_weights = attention_RNN(encoder_outputs,
encoder_state,
num_decoder_symbols,
sequence_length,
use_attention=use_attention)
if per_example_loss is None:
assert len(logits) == len(targets)
# We need to make target and int64-tensor and set its shape.
bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss_by_example(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
else:
assert len(logits) == len(targets)
bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
crossent = sequence_loss(
logits, bucket_target, weights,
softmax_loss_function=softmax_loss_function)
return logits, crossent
示例3: to_weighted_sum
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor)[0]
if dimension > 1:
i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
# TODO(chapelle): move that logic to insert_transformed_feature to ensure
# unique buckets across dimensions after crossing.
bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
bucket_indices = array_ops.reshape(input_tensor, [-1])
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, 1]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
vocab_size = self.length * self.source_column.dimension
return _create_embedding_lookup(
sparse_id_values, vocab_size, num_outputs,
_add_variable_collection(weight_collections), 0., "sum",
trainable, self.name + "_weights")
示例4: _call_cell
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
示例5: sparse_feature_cross
def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
name=None):
"""Crosses a list of Tensor or SparseTensor objects.
See sparse_cross_op.cc for more details.
Args:
inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` with the crossed features.
Return type is string if hashed_output=False, int64 otherwise.
Raises:
TypeError: If the inputs aren't either SparseTensor or Tensor.
"""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(i, ops.SparseTensor) or
isinstance(i, ops.Tensor) for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [i for i in inputs if isinstance(i, ops.SparseTensor)]
dense_inputs = [i for i in inputs if not isinstance(i, ops.SparseTensor)]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = (
_sparse_feature_cross_op.sparse_feature_cross(indices,
values,
shapes,
dense_inputs,
hashed_output,
num_buckets,
out_type=out_type,
internal_type=internal_type,
name=name))
return ops.SparseTensor(indices_out, values_out, shape_out)
示例6: testDistribution
def testDistribution(self, initial_known):
classes = np.random.randint(5, size=(20000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.to_int64(classes) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()
get_next = dataset.apply(
resampling.rejection_resample(
target_dist=target_dist,
initial_dist=initial_dist,
class_func=lambda c, _: c,
seed=27)).make_one_shot_iterator().get_next()
with self.cached_session() as sess:
returned = []
while len(returned) < 4000:
returned.append(sess.run(get_next))
returned_classes, returned_classes_and_data = zip(*returned)
_, returned_data = zip(*returned_classes_and_data)
self.assertAllEqual([compat.as_bytes(str(c))
for c in returned_classes], returned_data)
total_returned = len(returned_classes)
class_counts = np.array([
len([True for v in returned_classes if v == c])
for c in range(5)])
returned_dist = class_counts / total_returned
self.assertAllClose(target_dist, returned_dist, atol=1e-2)
示例7: make_splits
def make_splits(self, stamp_token, next_stamp_token, class_id):
"""Create the best split using the accumulated stats and flush the state."""
# Get the aggregated gradients and hessians per <partition_id, feature_id>
# pair.
num_minibatches, partition_ids, feature_ids, gradients, hessians = (
self._stats_accumulator.flush(stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(
ops.convert_to_tensor(self._loss_uses_sum_reduction),
lambda: math_ops.to_int64(1), lambda: num_minibatches)
partition_ids, gains, split_infos = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=num_minibatches,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=self._feature_column_group_id,
l1_regularization=self._l1_regularization,
l2_regularization=self._l2_regularization,
tree_complexity_regularization=self._tree_complexity_regularization,
min_node_weight=self._min_node_weight,
bias_feature_id=_BIAS_FEATURE_ID,
multiclass_strategy=self._multiclass_strategy,
weak_learner_type=self._weak_learner_type))
# There are no warm-up rounds needed in the equality column handler. So we
# always return ready.
are_splits_ready = constant_op.constant(True)
return (are_splits_ready, partition_ids, gains, split_infos)
示例8: _process_labels
def _process_labels(self, labels):
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
else:
label_ids = labels
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
msg = ('labels shape must be [batch_size, {}]. '
'Given: ').format(self._n_classes)
labels_shape = array_ops.shape(labels)
check_rank_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(labels), 2),
data=[msg, labels_shape])
check_label_dim = control_flow_ops.Assert(
math_ops.equal(labels_shape[-1], self._n_classes),
data=[msg, labels_shape])
with ops.control_dependencies([check_rank_op, check_label_dim]):
return array_ops.identity(labels)
示例9: Forward
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
示例10: split
def split(self, value, lengths, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit", [self._flow, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
lengths_64 = math_ops.to_int64(lengths)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
self._merge_element_shape(
tensor_shape.TensorShape([clengths[0]]).concatenate(
value.shape[1:]))
flow_out = list_ops.tensor_list_split(
tensor=value,
lengths=lengths_64,
element_shape=self._element_shape[0] if self._element_shape else None,
name=name)
ta = TensorArray(
dtype=self._dtype,
handle=self.handle,
flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
示例11: _call_cell
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len x batch_size x
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len x batch_size x
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"W_0", [input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"B", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("W_I_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("W_O_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("W_F_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = time_len
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = _lstm_ops_so.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
示例12: ndlstm_base_dynamic
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using dynamic_rnn and
the TensorFlow LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
# TODO(tmb) make batch size, sequence_length dynamic
# example: sequence_length = tf.shape(inputs)[0]
_, batch_size, _ = _shape(inputs)
lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
sequence_length = int(inputs.get_shape()[0])
sequence_lengths = math_ops.to_int64(
array_ops.fill([batch_size], sequence_length))
if reverse:
inputs = array_ops.reverse_v2(inputs, [0])
outputs, _ = rnn.dynamic_rnn(
lstm_cell, inputs, sequence_lengths, state, time_major=True)
if reverse:
outputs = array_ops.reverse_v2(outputs, [0])
return outputs
示例13: _select_last_activations
def _select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
示例14: generate_single_output
def generate_single_output(encoder_state, attention_states, sequence_length, targets, num_classes, buckets,
use_mean_attention=False,
softmax_loss_function=None, per_example_loss=False, name=None, use_attention=False):
all_inputs = targets
with ops.op_scope(all_inputs, name, "model_with_buckets"):
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=None):
bucket_attention_states, bucket_attn_weights, bucket_attns, bucket_outputs = attention_single_output_decoder(
encoder_state, attention_states, output_size=num_classes,
num_heads=1,
sequence_length=sequence_length,
initial_state_attention=True,
use_attention=use_attention)
if softmax_loss_function is None:
assert len(bucket_outputs) == len(targets) == 1
# We need to make target and int64-tensor and set its shape.
bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=bucket_outputs[0], labels=bucket_target)
else:
assert len(bucket_outputs) == len(targets) == 1
crossent = softmax_loss_function(bucket_outputs[0], targets[0])
batch_size = array_ops.shape(targets[0])[0]
loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)
return bucket_outputs, loss
示例15: index_to_string_table_from_tensor
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)