本文整理匯總了Python中tensorflow.contrib.lookup.index_table_from_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python lookup.index_table_from_tensor方法的具體用法?Python lookup.index_table_from_tensor怎麽用?Python lookup.index_table_from_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.lookup
的用法示例。
在下文中一共展示了lookup.index_table_from_tensor方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def __init__(self, speaker_list, filenames, num_epoch=1):
with tf.device('/cpu'):
with tf.name_scope('ByteInputPipeline'):
self.speaker_list = tf.constant(speaker_list)
self.table = index_table_from_tensor(mapping=self.speaker_list)
print('{} files found'.format(len(filenames)))
dataset = (
tf.data.TFRecordDataset(filenames)
.map(self._parse_function)
.batch(1)
.repeat(num_epoch)
)
self.iterator = dataset.make_initializable_iterator()
self.x, self.y, self.f, self.w, self.t = self.iterator.get_next()
示例2: _string_to_int
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def _string_to_int(x, vocab):
"""Given a vocabulary and a string tensor `x`, maps `x` into an int tensor.
Args:
x: A `Column` representing a string value.
vocab: list of strings.
Returns:
A `Column` where each string value is mapped to an integer representing
its index in the vocab. Out of vocab values are mapped to len(vocab).
"""
def _map_to_int(x):
"""Maps string tensor into indexes using vocab.
Args:
x : a Tensor/SparseTensor of string.
Returns:
a Tensor/SparseTensor of indexes (int) of the same shape as x.
"""
table = lookup.index_table_from_tensor(
vocab,
default_value=len(vocab))
return table.lookup(x)
return _map_to_int(x)
示例3: build_tensorize_text_fn
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def build_tensorize_text_fn(embeddings):
"""Builds a function to turn text into word/char ids."""
tbl = contrib_lookup.index_table_from_tensor(
mapping=embeddings.get_vocab(), num_oov_buckets=1)
def fn(string_tensor):
"""Builds the output tensor dictionary."""
out = {}
if FLAGS.lowercase:
string_tensor = ops.lowercase_op(string_tensor)
out["wids"] = tf.to_int32(tbl.lookup(string_tensor))
out["cids"] = char_utils.batch_word_to_char_ids(string_tensor, 50)
out["len"] = tf.shape(string_tensor)[-1]
return out
return fn
示例4: _do_transform
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def _do_transform(self, input_tensor):
table = lookup.index_table_from_tensor(
mapping=tuple(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
dtype=self.dtype,
name="lookup")
return table.lookup(input_tensor)
示例5: _string_to_int
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def _string_to_int(x, vocab):
"""Given a vocabulary and a string tensor `x`, maps `x` into an int tensor.
Args:
x: A `Column` representing a string value.
vocab: list of strings.
Returns:
A `Column` where each string value is mapped to an integer representing
its index in the vocab. Out of vocab values are mapped to len(vocab).
"""
def _map_to_int(x):
"""Maps string tensor into indexes using vocab.
Args:
x : a Tensor/SparseTensor of string.
Returns:
a Tensor/SparseTensor of indexes (int) of the same shape as x.
"""
table = lookup.index_table_from_tensor(
vocab,
default_value=len(vocab))
return table.lookup(x)
return _map_to_int(x)
# TODO(brandondura): update this to not depend on tf layer's feature column
# 'sum' combiner in the future.
示例6: string_to_int_mapper
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def string_to_int_mapper(keys_to_map, mapping, num_oov_buckets=1, suffix="_id"):
"""Creates a mapping function to convert strings to ints in a tf.data.Dataset.
For `dataset` outputs of type `str`, uses the list of strings in the given
input `mapping` to look up the strings using tf.contrib.lookup and convert
them to same-shape tensors of size tf.int32.
Example:
vocab = ['the', 'fox', 'jumped']
dataset = dataset.map(string_to_int_mapper(['words'], mapping=vocab))
dataset['words_id'] # <-- 'the' is mapped to 0, 'fox' to 1, etc...
Args:
keys_to_map: List of strings that are keys for tf.string Tensors to lookup.
mapping: List of strings (or string tensors) to do the lookup. If the
mapping is already a lookup table, then we directly use it.
num_oov_buckets: Number of OOV buckets to use (default = 1).
suffix: String to append to the given keys to indicate the mapped Tensors.
Returns:
_mapper: A mapping function that can be used with the tf.data.Dataset API.
"""
if isinstance(mapping, LookupInterface):
table = mapping
else:
table = contrib_lookup.index_table_from_tensor(
mapping=mapping, num_oov_buckets=num_oov_buckets)
def _mapper(dataset):
for k in keys_to_map:
dataset[k + suffix] = tf.to_int32(table.lookup(dataset[k]))
return dataset
return _mapper
示例7: get_lookup_table
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def get_lookup_table(self):
"""Create the lookup table base on the vocabulary."""
return contrib_lookup.index_table_from_tensor(
mapping=self._idx2str, num_oov_buckets=self._num_oov_buckets)
示例8: make_iterator_from_text_dataset
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def make_iterator_from_text_dataset(text_dataset, batch_size, unit_dict, shuffle=False, bucket_width=-1, num_cores=4):
from tensorflow.contrib.lookup import index_table_from_tensor
table = index_table_from_tensor(mapping=list(unit_dict.values()))
dataset = tf.data.TextLineDataset(text_dataset)
dataset = dataset.map(lambda str: tf.string_split([str], delimiter='').values)
dataset = dataset.map(lambda chars: (chars, tf.size(chars)))
dataset = dataset.map(lambda chars, size: (table.lookup(chars), size))
if shuffle is True:
dataset = dataset.shuffle(buffer_size=1000000, reshuffle_each_iteration=True)
def batching_fun(x):
labels_shape = (tf.TensorShape([None]), tf.TensorShape([]), )
return x.padded_batch(
batch_size=batch_size,
padded_shapes=(labels_shape)
)
if bucket_width == -1:
dataset = batching_fun(dataset)
else:
def key_func(labels, labels_len):
# labels_len = tf.shape(labels)[0]
bucket_id = labels_len // bucket_width
return tf.cast(bucket_id, dtype=tf.int64)
def reduce_func(unused_key, windowed_dataset):
return batching_fun(windowed_dataset)
dataset = tf.data.Dataset.apply(dataset, tf.data.experimental.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size))
dataset = dataset.prefetch(128)
iterator = dataset.make_initializable_iterator()
labels, labels_len = iterator.get_next()
return BatchedData(
iterator_initializer=iterator.initializer,
inputs_filenames=None,
labels_filenames=None,
inputs=None,
payload=None,
inputs_length=None,
labels=labels,
labels_length=labels_len
)
示例9: __init__
# 需要導入模塊: from tensorflow.contrib import lookup [as 別名]
# 或者: from tensorflow.contrib.lookup import index_table_from_tensor [as 別名]
def __init__(self,
split_name,
preprocess_fn,
num_epochs,
shuffle,
random_seed=None,
filter_filename=None,
drop_remainder=True):
"""Initialize the dataset object.
Args:
split_name: A string split name, to load from the dataset.
preprocess_fn: Preprocess a single example. The example is already
parsed into a dictionary.
num_epochs: An int, defaults to `None`. Number of epochs to cycle
through the dataset before stopping. If set to `None` this will read
samples indefinitely.
shuffle: A boolean, defaults to `False`. Whether output data are
shuffled.
random_seed: Optional int. Random seed for shuffle operation.
filter_filename: Optional filename to use for filtering.
drop_remainder: If true, then the last incomplete batch is dropped.
"""
# This is an instance-variable instead of a class-variable because it
# depends on FLAGS, which is not parsed yet at class-parse-time.
files = os.path.join(os.path.expanduser(FLAGS.dataset_dir),
'image_imagenet-%s@%i')
filenames = {
'train': generate_sharded_filenames(files % ('train', 1024))[:-40],
'val': generate_sharded_filenames(files % ('train', 1024))[-40:],
'trainval': generate_sharded_filenames(files % ('train', 1024)),
'test': generate_sharded_filenames(files % ('dev', 128))
}
super(DatasetImagenet, self).__init__(
filenames=filenames[split_name],
reader=tf.data.TFRecordDataset,
num_epochs=num_epochs,
shuffle=shuffle,
random_seed=random_seed,
filter_fn=self.get_filter() if filter_filename is not None else None,
drop_remainder=drop_remainder)
self.split_name = split_name
self.preprocess_fn = preprocess_fn
self.filename_list = None
if filter_filename is not None:
with tf.gfile.Open(filter_filename, 'r') as f:
filename_list = json.load(f)
filename_list = tf.constant(filename_list['values'])
filename_list = index_table_from_tensor(
mapping=filename_list, num_oov_buckets=0, default_value=-1)
self.filename_list = filename_list