当前位置: 首页>>代码示例>>Python>>正文


Python caption_generator.CaptionGenerator方法代码示例

本文整理汇总了Python中im2txt.inference_utils.caption_generator.CaptionGenerator方法的典型用法代码示例。如果您正苦于以下问题:Python caption_generator.CaptionGenerator方法的具体用法?Python caption_generator.CaptionGenerator怎么用?Python caption_generator.CaptionGenerator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在im2txt.inference_utils.caption_generator的用法示例。


在下文中一共展示了caption_generator.CaptionGenerator方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _assertExpectedCaptions

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def _assertExpectedCaptions(self,
                              expected_captions,
                              beam_size=3,
                              max_caption_length=20,
                              length_normalization_factor=0):
    """Tests that beam search generates the expected captions.

    Args:
      expected_captions: A sequence of pairs (sentence, probability), where
        sentence is a list of integer ids and probability is a float in [0, 1].
      beam_size: Parameter passed to beam_search().
      max_caption_length: Parameter passed to beam_search().
      length_normalization_factor: Parameter passed to beam_search().
    """
    expected_sentences = [c[0] for c in expected_captions]
    expected_probabilities = [c[1] for c in expected_captions]

    # Generate captions.
    generator = caption_generator.CaptionGenerator(
        model=FakeModel(),
        vocab=FakeVocab(),
        beam_size=beam_size,
        max_caption_length=max_caption_length,
        length_normalization_factor=length_normalization_factor)
    actual_captions = generator.beam_search(sess=None, encoded_image=None)

    actual_sentences = [c.sentence for c in actual_captions]
    actual_probabilities = [math.exp(c.logprob) for c in actual_captions]

    self.assertEqual(expected_sentences, actual_sentences)
    self.assertAllClose(expected_probabilities, actual_probabilities) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:33,代码来源:caption_generator_test.py

示例2: main

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:39,代码来源:run_inference.py

示例3: __init__

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def __init__(self, model_path, vocab_path):
        self.model_path = model_path
        self.vocab_path = vocab_path
        self.g = tf.Graph()
        with self.g.as_default():
            self.model = inference_wrapper.InferenceWrapper()
            self.restore_fn = self.model.build_graph_from_config(
                    configuration.ModelConfig(), model_path)
        self.g.finalize()
        self.vocab = vocabulary.Vocabulary(vocab_path)
        self.generator = caption_generator.CaptionGenerator(self.model,
                                                            self.vocab)
        self.sess = tf.Session(graph=self.g)
        self.restore_fn(self.sess)
        return 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:17,代码来源:application.py

示例4: main

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "rb") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:39,代码来源:run_inference.py

示例5: main

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (xyzj=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:sshleifer,项目名称:object_detection_kitti,代码行数:39,代码来源:run_inference.py

示例6: main

# 需要导入模块: from im2txt.inference_utils import caption_generator [as 别名]
# 或者: from im2txt.inference_utils.caption_generator import CaptionGenerator [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
    # preprocessing compute graph
    image_placeholder = tf.placeholder(dtype=tf.string, shape=[])
    preprocessor = model.model.process_image(image_placeholder)

  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)


  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)


    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      _, file_extension = os.path.splitext(filename)
      if file_extension == ".npy":
        # load numpy array
        image = np.squeeze(np.load(filename))
      else:
        with tf.gfile.GFile(filename, "rb") as f:
          image = f.read()
          image = sess.run(preprocessor, {image_placeholder: image})
          print('raw image shape is', image.shape)
    
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
        print(caption.sentence)
        # print(generator.new_caption_prob(sess, caption.sentence, image))
        print(model.new_caption_prob(sess, caption.sentence, image))
      
      new_sentence = "kite"
      new_sentence = new_sentence.split()
      print("My new sentence:", new_sentence)
      new_caption = [vocab.start_id]+[vocab.word_to_id(w) for w in new_sentence] + [vocab.end_id]
      print("My new id:", new_caption)
      print(model.new_caption_prob(sess, new_caption, image)) 
开发者ID:IBM,项目名称:Image-Captioning-Attack,代码行数:63,代码来源:run_inference.py


注:本文中的im2txt.inference_utils.caption_generator.CaptionGenerator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。