當前位置: 首頁>>代碼示例>>Python>>正文


Python inference_wrapper.InferenceWrapper方法代碼示例

本文整理匯總了Python中im2txt.inference_wrapper.InferenceWrapper方法的典型用法代碼示例。如果您正苦於以下問題:Python inference_wrapper.InferenceWrapper方法的具體用法?Python inference_wrapper.InferenceWrapper怎麽用?Python inference_wrapper.InferenceWrapper使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在im2txt.inference_wrapper的用法示例。


在下文中一共展示了inference_wrapper.InferenceWrapper方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:39,代碼來源:run_inference.py

示例2: __init__

# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def __init__(self, model_path, vocab_path):
        self.model_path = model_path
        self.vocab_path = vocab_path
        self.g = tf.Graph()
        with self.g.as_default():
            self.model = inference_wrapper.InferenceWrapper()
            self.restore_fn = self.model.build_graph_from_config(
                    configuration.ModelConfig(), model_path)
        self.g.finalize()
        self.vocab = vocabulary.Vocabulary(vocab_path)
        self.generator = caption_generator.CaptionGenerator(self.model,
                                                            self.vocab)
        self.sess = tf.Session(graph=self.g)
        self.restore_fn(self.sess)
        return 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:17,代碼來源:application.py

示例3: main

# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "rb") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
開發者ID:itsamitgoel,項目名稱:Gun-Detector,代碼行數:39,代碼來源:run_inference.py

示例4: main

# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (xyzj=%f)" % (i, sentence, math.exp(caption.logprob))) 
開發者ID:sshleifer,項目名稱:object_detection_kitti,代碼行數:39,代碼來源:run_inference.py

示例5: main

# 需要導入模塊: from im2txt import inference_wrapper [as 別名]
# 或者: from im2txt.inference_wrapper import InferenceWrapper [as 別名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
    # preprocessing compute graph
    image_placeholder = tf.placeholder(dtype=tf.string, shape=[])
    preprocessor = model.model.process_image(image_placeholder)

  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)


  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)


    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      _, file_extension = os.path.splitext(filename)
      if file_extension == ".npy":
        # load numpy array
        image = np.squeeze(np.load(filename))
      else:
        with tf.gfile.GFile(filename, "rb") as f:
          image = f.read()
          image = sess.run(preprocessor, {image_placeholder: image})
          print('raw image shape is', image.shape)
    
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
        print(caption.sentence)
        # print(generator.new_caption_prob(sess, caption.sentence, image))
        print(model.new_caption_prob(sess, caption.sentence, image))
      
      new_sentence = "kite"
      new_sentence = new_sentence.split()
      print("My new sentence:", new_sentence)
      new_caption = [vocab.start_id]+[vocab.word_to_id(w) for w in new_sentence] + [vocab.end_id]
      print("My new id:", new_caption)
      print(model.new_caption_prob(sess, new_caption, image)) 
開發者ID:IBM,項目名稱:Image-Captioning-Attack,代碼行數:63,代碼來源:run_inference.py


注:本文中的im2txt.inference_wrapper.InferenceWrapper方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。