当前位置: 首页>>代码示例>>Python>>正文


Python predictor.from_saved_model方法代码示例

本文整理汇总了Python中tensorflow.contrib.predictor.from_saved_model方法的典型用法代码示例。如果您正苦于以下问题:Python predictor.from_saved_model方法的具体用法?Python predictor.from_saved_model怎么用?Python predictor.from_saved_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.predictor的用法示例。


在下文中一共展示了predictor.from_saved_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: to_predictor

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def to_predictor(estimator, directory=DEFAULT_EXPORT_DIRECTORY):
    """ Exports given estimator as predictor into the given directory
    and returns associated tf.predictor instance.

    :param estimator: Estimator to export.
    :param directory: (Optional) path to write exported model into.
    """

    input_provider = InputProviderFactory.get(estimator.params)
    def receiver():
        features = input_provider.get_input_dict_placeholders()
        return tf.estimator.export.ServingInputReceiver(features, features)

    estimator.export_saved_model(directory, receiver)
    versions = [
        model for model in Path(directory).iterdir()
        if model.is_dir() and 'temp' not in str(model)]
    latest = str(sorted(versions)[-1])
    return predictor.from_saved_model(latest) 
开发者ID:deezer,项目名称:spleeter,代码行数:21,代码来源:estimator.py

示例2: _annotate_long_answer

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def _annotate_long_answer(predict_fn, question, contexts):
  """Applies the model to the (question, contexts) and returns long answer.

  Args:
    predict_fn: Predictor from tf.contrib.predictor.from_saved_model.
    question: string.
    contexts: List of strings.

  Returns:
    long_answer_idx: integer.
    long_answer_score: float.
  """
  # The inputs are not tokenized here because there are multiple contexts.
  inputs = {"question": question, "context": contexts}

  outputs = predict_fn(inputs)
  long_answer_idx = outputs["idx"]
  long_answer_score = outputs["score"]

  return long_answer_idx, float(long_answer_score) 
开发者ID:google-research,项目名称:language,代码行数:22,代码来源:nq_export_scorer.py

示例3: _annotate_short_answer

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def _annotate_short_answer(predict_fn, question_tokens, context_tokens):
  """Applies the model to the (question, contexts) and returns long answer.

  Args:
    predict_fn: Predictor from tf.contrib.predictor.from_saved_model.
    question_tokens: List of strings.
    context_tokens: List of strings.

  Returns:
    long_answer_idx: integer.
    long_answer_score: float.
  """
  # The inputs are tokenized unlike in the long answer case, since the goal
  # is to pick out a particular span in a single context.
  inputs = {"question": question_tokens, "context": context_tokens}
  outputs = predict_fn(inputs)
  start_idx = outputs["start_idx"]
  end_idx = outputs["end_idx"]
  short_answer_score = outputs["score"]

  return start_idx, end_idx, float(short_answer_score) 
开发者ID:google-research,项目名称:language,代码行数:23,代码来源:nq_export_scorer.py

示例4: __init__

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))

        # Parameters for inference (need to be the same values the model was trained with)
        self.max_seq_length = 512
        self.doc_stride = 128
        self.max_query_length = 64
        self.max_answer_length = 30

        # Initialize the tokenizer
        self.tokenizer = FullTokenizer(
            vocab_file='assets/vocab.txt', do_lower_case=True)

        self.predict_fn = predictor.from_saved_model(DEFAULT_MODEL_PATH)

        logger.info('Loaded model') 
开发者ID:IBM,项目名称:MAX-Question-Answering,代码行数:18,代码来源:model.py

示例5: load_model

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def load_model(self):
		self.graph = tf.Graph()
		with self.graph.as_default():
			self.predict_fn = predictor.from_saved_model(self.config['model']) 
开发者ID:yyht,项目名称:BERT,代码行数:6,代码来源:example.py

示例6: __init__

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def __init__(self, conf, **kwargs):
        self.conf = conf
        for attr in conf:
            setattr(self, attr, conf[attr])
        self.zdy = {}
        #init embedding
        self.init_embedding()
        #load train data
        csv = pd.read_csv(self.ori_path, header = 0, sep="\t", error_bad_lines=False)
        if 'text' in csv.keys() and 'target' in csv.keys():
            #format: text \t target
            #for this format, the size for each class should be larger than 2 
            self.text_list = list(csv['text'])
            self.label_list = list(csv['target'])
        elif 'text_a' in csv.keys() and 'text_b' in csv.keys() and'target' in csv.keys():
            #format: text_a \t text_b \t target
            #for this format, target value can only be choosen from 0 or 1
            self.text_a_list = list(csv['text_a'])
            self.text_b_list = list(csv['text_b'])
            self.text_list = self.text_a_list + self.text_b_list
            self.label_list = list(csv['target'])

        subdirs = [os.path.join(self.export_dir_path,x) for x in os.listdir(self.export_dir_path)
                if 'temp' not in(x)]

        latest = str(sorted(subdirs)[-1])
        self.predict_fn = predictor.from_saved_model(latest) 
开发者ID:zhufz,项目名称:nlp_research,代码行数:29,代码来源:test.py

示例7: load

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def load(cls,
             meta,
             model_dir=None,  # type: Text
             model_metadata=None,  # type: Metadata
             cached_component=None,  # type: Optional[Component]
             **kwargs  # type: **Any
             ):
        # type: (...) -> EmbeddingBertIntentAdanetClassifier

        config_proto = cls.get_config_proto(meta)

        print("bert model loaded")

        if model_dir and meta.get("file"):
            file_name = meta.get("file")
            # tensorflow.contrib.predictor to load the model file which may has 10x speed up in predict time
            predict = Pred.from_saved_model(export_dir=os.path.join(model_dir,file_name),config=config_proto)

            with io.open(os.path.join(
                    model_dir,
                    file_name + "_inv_intent_dict.pkl"), 'rb') as f:
                inv_intent_dict = pickle.load(f)
            with io.open(os.path.join(
                    model_dir,
                    file_name + "_encoded_all_intents.pkl"), 'rb') as f:
                encoded_all_intents = pickle.load(f)

            return EmbeddingBertIntentEstimatorClassifier(
                    component_config=meta,
                    inv_intent_dict=inv_intent_dict,
                    encoded_all_intents=encoded_all_intents,
                    predictor=predict
            )

        else:
            logger.warning("Failed to load nlu model. Maybe path {} "
                           "doesn't exist"
                           "".format(os.path.abspath(model_dir)))
            return EmbeddingBertIntentEstimatorClassifier(component_config=meta) 
开发者ID:GaoQ1,项目名称:rasa_nlu_gq,代码行数:41,代码来源:embedding_bert_intent_estimator_classifier.py

示例8: test_create_serving_input_receiver_numpy

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def test_create_serving_input_receiver_numpy(self):
    (model_dir, mock_t2r_model,
     prediction_ref) = self._train_and_eval_reference_model('numpy')
    exporter = default_export_generator.DefaultExportGenerator()
    exporter.set_specification_from_model(mock_t2r_model)

    # Export trained serving estimator.
    estimator_exporter = tf.estimator.Estimator(
        model_fn=mock_t2r_model.model_fn,
        config=tf.estimator.RunConfig(model_dir=model_dir))

    serving_input_receiver_fn = (
        exporter.create_serving_input_receiver_numpy_fn())
    exported_savedmodel_path = estimator_exporter.export_saved_model(
        export_dir_base=model_dir,
        serving_input_receiver_fn=serving_input_receiver_fn,
        checkpoint_path=tf.train.latest_checkpoint(model_dir))

    # Load trained and exported serving estimator, run prediction and assert
    # it is the same as before exporting.
    feed_predictor_fn = contrib_predictor.from_saved_model(
        exported_savedmodel_path)
    mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE)
    features, labels = mock_input_generator.create_numpy_data()
    for pos, value in enumerate(prediction_ref):
      actual = feed_predictor_fn({'x': features[pos, :].reshape(
          1, -1)})['logit'].flatten()
      predicted = value['logit'].flatten()
      np.testing.assert_almost_equal(
          actual=actual, desired=predicted, decimal=4)
      if labels[pos] > 0:
        self.assertGreater(predicted[0], 0)
      else:
        self.assertLess(predicted[0], 0) 
开发者ID:google-research,项目名称:tensor2robot,代码行数:36,代码来源:default_export_generator_test.py

示例9: __init__

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def __init__(self, pb_path):
        subdirs = [x for x in Path(pb_path).iterdir()
                   if x.is_dir() and 'temp' not in str(x)]
        latest = str(sorted(subdirs)[-1])

        self.predict_fn = predictor.from_saved_model(latest)
        self.vocab_idx, self.idx_vocab = vocab_idx, idx_vocab 
开发者ID:KnightZhang625,项目名称:BERT_TF,代码行数:9,代码来源:run_predict.py

示例10: __init__

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def __init__(self, pb_path):
        subdirs = [x for x in Path(pb_path).iterdir()
                    if x.is_dir() and 'temp' not in str(x)]
        latest_model = str(sorted(subdirs)[-1])

        self.predict_fn = predictor.from_saved_model(latest_model) 
开发者ID:KnightZhang625,项目名称:BERT_TF,代码行数:8,代码来源:lm_predict.py

示例11: instance_predict_fn

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def instance_predict_fn(self):
        return predictor.from_saved_model(self.model_path) 
开发者ID:howl-anderson,项目名称:seq2annotation,代码行数:4,代码来源:tensorflow_inference.py

示例12: load_predict_fn

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def load_predict_fn(export_dir):
    global predict_fn
    predict_fn = predictor.from_saved_model(export_dir)

    return predict_fn 
开发者ID:howl-anderson,项目名称:seq2annotation,代码行数:7,代码来源:lookup_http.py

示例13: __init__

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def __init__(self, model_path):
        # load model
        self.model_dir = model_path
        self.predict_fn = predictor.from_saved_model(model_path) 
开发者ID:howl-anderson,项目名称:seq2annotation,代码行数:6,代码来源:tensorflow_inference.py

示例14: _initialize_upon_import

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def _initialize_upon_import():
    """Initialize / Restore Model Object."""
    saved_model_path = './pipeline_tfserving/0'
    return predictor.from_saved_model(saved_model_path)


# This is called unconditionally at *module import time*... 
开发者ID:PipelineAI,项目名称:models,代码行数:9,代码来源:pipeline_invoke_python.py

示例15: _initialize_upon_import

# 需要导入模块: from tensorflow.contrib import predictor [as 别名]
# 或者: from tensorflow.contrib.predictor import from_saved_model [as 别名]
def _initialize_upon_import():

    try:

        saved_model_path = './pipeline_tfserving/0'
        return predictor.from_saved_model(saved_model_path)

    except Exception:
        _logger.error('pipeline_invoke_python._initialize_upon_import.Exception:', exc_info=True)

    return None 
开发者ID:PipelineAI,项目名称:models,代码行数:13,代码来源:pipeline_invoke_python_normal_transformer.py


注:本文中的tensorflow.contrib.predictor.from_saved_model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。