当前位置: 首页>>代码示例>>Python>>正文


Python vocabulary.Vocabulary方法代码示例

本文整理汇总了Python中im2txt.inference_utils.vocabulary.Vocabulary方法的典型用法代码示例。如果您正苦于以下问题:Python vocabulary.Vocabulary方法的具体用法?Python vocabulary.Vocabulary怎么用?Python vocabulary.Vocabulary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在im2txt.inference_utils.vocabulary的用法示例。


在下文中一共展示了vocabulary.Vocabulary方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def __init__(self, job_dir=FLAGS.job_dir):
    key_inp = tf.placeholder(tf.int32, [None])
    lk = tf.shape(key_inp)[0]
    key = tf.expand_dims(key_inp, axis=0)
    lk = tf.expand_dims(lk, axis=0)
    initial_state_op = _tower_fn(key, lk)

    vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
    self.saver = tf.train.Saver()

    self.key_inp = key_inp
    self.init_state = initial_state_op
    self.vocab = vocab
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    self.sess = tf.Session(config=config)

    self.restore_fn(job_dir)
    self.tf = tf 
开发者ID:fengyang0317,项目名称:unsupervised_captioning,代码行数:20,代码来源:sentence_infer.py

示例2: main

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:39,代码来源:run_inference.py

示例3: __init__

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def __init__(self, model_path, vocab_path):
        self.model_path = model_path
        self.vocab_path = vocab_path
        self.g = tf.Graph()
        with self.g.as_default():
            self.model = inference_wrapper.InferenceWrapper()
            self.restore_fn = self.model.build_graph_from_config(
                    configuration.ModelConfig(), model_path)
        self.g.finalize()
        self.vocab = vocabulary.Vocabulary(vocab_path)
        self.generator = caption_generator.CaptionGenerator(self.model,
                                                            self.vocab)
        self.sess = tf.Session(graph=self.g)
        self.restore_fn(self.sess)
        return 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:17,代码来源:application.py

示例4: main

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "rb") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:39,代码来源:run_inference.py

示例5: main

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (xyzj=%f)" % (i, sentence, math.exp(caption.logprob))) 
开发者ID:sshleifer,项目名称:object_detection_kitti,代码行数:39,代码来源:run_inference.py

示例6: main

# 需要导入模块: from im2txt.inference_utils import vocabulary [as 别名]
# 或者: from im2txt.inference_utils.vocabulary import Vocabulary [as 别名]
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
    # preprocessing compute graph
    image_placeholder = tf.placeholder(dtype=tf.string, shape=[])
    preprocessor = model.model.process_image(image_placeholder)

  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)


  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)


    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      _, file_extension = os.path.splitext(filename)
      if file_extension == ".npy":
        # load numpy array
        image = np.squeeze(np.load(filename))
      else:
        with tf.gfile.GFile(filename, "rb") as f:
          image = f.read()
          image = sess.run(preprocessor, {image_placeholder: image})
          print('raw image shape is', image.shape)
    
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
        print(caption.sentence)
        # print(generator.new_caption_prob(sess, caption.sentence, image))
        print(model.new_caption_prob(sess, caption.sentence, image))
      
      new_sentence = "kite"
      new_sentence = new_sentence.split()
      print("My new sentence:", new_sentence)
      new_caption = [vocab.start_id]+[vocab.word_to_id(w) for w in new_sentence] + [vocab.end_id]
      print("My new id:", new_caption)
      print(model.new_caption_prob(sess, new_caption, image)) 
开发者ID:IBM,项目名称:Image-Captioning-Attack,代码行数:63,代码来源:run_inference.py


注:本文中的im2txt.inference_utils.vocabulary.Vocabulary方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。