本文整理匯總了Python中tensorflow.python.ops.lookup_ops.index_to_string_table_from_file方法的典型用法代碼示例。如果您正苦於以下問題:Python lookup_ops.index_to_string_table_from_file方法的具體用法?Python lookup_ops.index_to_string_table_from_file怎麽用?Python lookup_ops.index_to_string_table_from_file使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.lookup_ops
的用法示例。
在下文中一共展示了lookup_ops.index_to_string_table_from_file方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None, single_cell_fn=None):
"""Create inference model."""
graph = tf.Graph()
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default():
tgt_vocab_table = vocab_utils.create_tgt_vocab_table(tgt_vocab_file)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(src_placeholder)
iterator = iterator_utils.get_infer_iterator(src_dataset, source_reverse=hparams.source_reverse, src_max_len=hparams.src_max_len_infer)
model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.INFER, target_vocab_table=tgt_vocab_table, reverse_target_vocab_table=reverse_tgt_vocab_table, scope=scope, single_cell_fn=single_cell_fn)
return InferModel(graph=graph, model=model, src_placeholder=src_placeholder, iterator=iterator)
示例2: _convert_ids_to_strings
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def _convert_ids_to_strings(tgt_vocab_file, ids):
"""Convert prediction ids to words."""
with tf.Session() as sess:
reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
sess.run(tf.tables_initializer())
translations = sess.run(
reverse_target_vocab_table.lookup(
tf.to_int64(tf.convert_to_tensor(np.asarray(ids)))))
return translations
示例3: build_graph_dist_strategy
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def build_graph_dist_strategy(self, features, labels, mode, params):
"""Model function."""
del labels, params
misc_utils.print_out("Running dist_strategy mode_fn")
hparams = self.hparams
# Create a GNMT model for training.
# assert (hparams.encoder_type == "gnmt" or
# hparams.attention_architecture in ["gnmt", "gnmt_v2"])
with mixed_precision_scope():
model = gnmt_model.GNMTModel(hparams, mode=mode, features=features)
if mode == tf.contrib.learn.ModeKeys.INFER:
sample_ids = model.sample_id
reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file(
hparams.tgt_vocab_file, default_value=vocab_utils.UNK)
sample_words = reverse_target_vocab_table.lookup(
tf.to_int64(sample_ids))
# make sure outputs is of shape [batch_size, time] or [beam_width,
# batch_size, time] when using beam search.
if hparams.time_major:
sample_words = tf.transpose(sample_words)
elif sample_words.shape.ndims == 3:
# beam search output in [batch_size, time, beam_width] shape.
sample_words = tf.transpose(sample_words, [2, 0, 1])
predictions = {"predictions": sample_words}
# return loss, vars, grads, predictions, train_op, scaffold
return None, None, None, predictions, None, None
elif mode == tf.contrib.learn.ModeKeys.TRAIN:
loss = model.train_loss
train_op = model.update
return loss, model.params, model.grads, None, train_op, None
else:
raise ValueError("Unknown mode in model_fn: %s" % mode)
示例4: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
source_reverse=hparams.source_reverse,
src_max_len=hparams.src_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例5: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例6: __init__
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def __init__(self, corpus_dir, hparams=None, training=True, buffer_size=8192):
"""
Args:
corpus_dir: Name of the folder storing corpus files for training.
hparams: The object containing the loaded hyper parameters. If None, it will be
initialized here.
training: Whether to use this object for training.
buffer_size: The buffer size used for mapping process during data processing.
"""
if hparams is None:
self.hparams = HParams(corpus_dir).hparams
else:
self.hparams = hparams
self.src_max_len = self.hparams.src_max_len
self.tgt_max_len = self.hparams.tgt_max_len
self.training = training
self.text_set = None
self.id_set = None
vocab_file = os.path.join(corpus_dir, VOCAB_FILE)
self.vocab_size, _ = check_vocab(vocab_file)
self.vocab_table = lookup_ops.index_table_from_file(vocab_file,
default_value=self.hparams.unk_id)
# print("vocab_size = {}".format(self.vocab_size))
if training:
self.case_table = prepare_case_table()
self.reverse_vocab_table = None
self._load_corpus(corpus_dir)
self._convert_to_tokens(buffer_size)
else:
self.case_table = None
self.reverse_vocab_table = \
lookup_ops.index_to_string_table_from_file(vocab_file,
default_value=self.hparams.unk_token)
示例7: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例8: create_rev_vocab_table
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_rev_vocab_table(vocab_file):
return lookup_ops.index_to_string_table_from_file(vocab_file, default_value=UNK)
示例9: test_module_export_vocab_on_custom_fs
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def test_module_export_vocab_on_custom_fs(self):
root_dir = "file://%s" % self.get_temp_dir()
export_dir = "%s_%s" % (root_dir, "export")
tf_v1.gfile.MakeDirs(export_dir)
# Create a module with a vocab file located on a custom filesystem.
vocab_dir = os.path.join(root_dir, "vocab_location")
tf_v1.gfile.MakeDirs(vocab_dir)
vocab_filename = os.path.join(vocab_dir, "tokens.txt")
tf_utils.atomic_write_string_to_file(vocab_filename, "one", False)
def create_assets_module_fn():
def assets_module_fn():
indices = tf_v1.placeholder(dtype=tf.int64, name="indices")
table = index_to_string_table_from_file(
vocabulary_file=vocab_filename, default_value="UNKNOWN")
outputs = table.lookup(indices)
hub.add_signature(inputs=indices, outputs=outputs)
return assets_module_fn
with tf.Graph().as_default():
assets_module_fn = create_assets_module_fn()
spec = hub.create_module_spec(assets_module_fn)
embedding_module = hub.Module(spec)
with tf_v1.Session() as sess:
sess.run(tf_v1.tables_initializer())
embedding_module.export(export_dir, sess)
module_files = tf_v1.gfile.ListDirectory(export_dir)
self.assertListEqual(
["assets", "saved_model.pb", "tfhub_module.pb", "variables"],
sorted(module_files))
module_files = tf_v1.gfile.ListDirectory(os.path.join(export_dir, "assets"))
self.assertListEqual(["tokens.txt"], module_files)
示例10: do_table_lookup
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def do_table_lookup(indices, vocabulary_file):
table = index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
default_value="UNKNOWN")
return table.lookup(indices)
示例11: create_eval_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
示例12: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None):
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab, hparams.max_vocab_size)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(tgt_vocab_file, default_value=UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
tgt_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)
tgt_dataset = tf.data.Dataset.from_tensor_slices(tgt_placeholder)
iterator = get_infer_iterator_exp(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.infer_batch_size,
sos=hparams.sos,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
tgt_placeholder=tgt_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例13: create_eval_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab
)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK
)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode,
)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args,
)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator,
)
示例14: create_infer_model
# 需要導入模塊: from tensorflow.python.ops import lookup_ops [as 別名]
# 或者: from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file [as 別名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab
)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK
)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode,
)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args,
)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator,
)