本文整理匯總了Python中im2txt.inference_wrapper.InferenceWrapper方法的典型用法代碼示例。如果您正苦於以下問題:Python inference_wrapper.InferenceWrapper方法的具體用法?Python inference_wrapper.InferenceWrapper怎麽用?Python inference_wrapper.InferenceWrapper使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類im2txt.inference_wrapper
的用法示例。
在下文中一共展示了inference_wrapper.InferenceWrapper方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
# Prepare the caption generator. Here we are implicitly using the default
# beam search parameters. See caption_generator.py for a description of the
# available beam search parameters.
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, "r") as f:
image = f.read()
captions = generator.beam_search(sess, image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
示例2: __init__
# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def __init__(self, model_path, vocab_path):
self.model_path = model_path
self.vocab_path = vocab_path
self.g = tf.Graph()
with self.g.as_default():
self.model = inference_wrapper.InferenceWrapper()
self.restore_fn = self.model.build_graph_from_config(
configuration.ModelConfig(), model_path)
self.g.finalize()
self.vocab = vocabulary.Vocabulary(vocab_path)
self.generator = caption_generator.CaptionGenerator(self.model,
self.vocab)
self.sess = tf.Session(graph=self.g)
self.restore_fn(self.sess)
return
示例3: main
# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
# Prepare the caption generator. Here we are implicitly using the default
# beam search parameters. See caption_generator.py for a description of the
# available beam search parameters.
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, "rb") as f:
image = f.read()
captions = generator.beam_search(sess, image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
示例4: main
# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
# Prepare the caption generator. Here we are implicitly using the default
# beam search parameters. See caption_generator.py for a description of the
# available beam search parameters.
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, "r") as f:
image = f.read()
captions = generator.beam_search(sess, image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s (xyzj=%f)" % (i, sentence, math.exp(caption.logprob)))
示例5: main
# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
# preprocessing compute graph
image_placeholder = tf.placeholder(dtype=tf.string, shape=[])
preprocessor = model.model.process_image(image_placeholder)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
# Prepare the caption generator. Here we are implicitly using the default
# beam search parameters. See caption_generator.py for a description of the
# available beam search parameters.
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
_, file_extension = os.path.splitext(filename)
if file_extension == ".npy":
# load numpy array
image = np.squeeze(np.load(filename))
else:
with tf.gfile.GFile(filename, "rb") as f:
image = f.read()
image = sess.run(preprocessor, {image_placeholder: image})
print('raw image shape is', image.shape)
captions = generator.beam_search(sess, image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
print(caption.sentence)
# print(generator.new_caption_prob(sess, caption.sentence, image))
print(model.new_caption_prob(sess, caption.sentence, image))
new_sentence = "kite"
new_sentence = new_sentence.split()
print("My new sentence:", new_sentence)
new_caption = [vocab.start_id]+[vocab.word_to_id(w) for w in new_sentence] + [vocab.end_id]
print("My new id:", new_caption)
print(model.new_caption_prob(sess, new_caption, image))