当前位置: 首页>>代码示例>>Python>>正文


Python estimator.Estimator方法代码示例

本文整理汇总了Python中tensorflow.python.estimator.estimator.Estimator方法的典型用法代码示例。如果您正苦于以下问题:Python estimator.Estimator方法的具体用法?Python estimator.Estimator怎么用?Python estimator.Estimator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.estimator.estimator的用法示例。


在下文中一共展示了estimator.Estimator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _evaluate_estimator

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _evaluate_estimator(self):
    if isinstance(self._estimator, core_estimator.Estimator):
      if any((x is not None for x in
              [self.x, self.y, self.batch_size, self.metrics])):
        raise ValueError(
            "tf.estimator.Estimator does not support following "
            "arguments: x, y, batch_size, metrics. Should set as `None` "
            "in ValidationMonitor")
      return self._estimator.evaluate(
          input_fn=self.input_fn, steps=self.eval_steps, hooks=self.hooks,
          name=self.name)
    else:
      return self._estimator.evaluate(
          x=self.x, y=self.y, input_fn=self.input_fn,
          batch_size=self.batch_size, steps=self.eval_steps,
          metrics=self.metrics, hooks=self.hooks, name=self.name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:monitors.py

示例2: end

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def end(self, session=None):
    super(ExportMonitor, self).end(session=session)
    latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
    if latest_path is None:
      logging.info("Skipping export at the end since model has not been saved "
                   "yet.")
      return
    if isinstance(self._estimator, core_estimator.Estimator):
      raise ValueError(
          "ExportMonitor does not support `tf.estimator.Estimator. `. "
          "Please pass an ExportStrategy to Experiment instead.")
    try:
      self._last_export_dir = self._estimator.export(
          self.export_dir,
          exports_to_keep=self.exports_to_keep,
          signature_fn=self.signature_fn,
          input_fn=self._input_fn,
          default_batch_size=self._default_batch_size,
          input_feature_key=self._input_feature_key,
          use_deprecated_input_fn=self._use_deprecated_input_fn)
    except RuntimeError:
      logging.info("Skipping exporting for the same step.") 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:monitors.py

示例3: _maybe_export

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _maybe_export(self, eval_result, checkpoint_path=None):
    """Export the Estimator using export_fn, if defined."""
    export_dir_base = os.path.join(
        compat.as_bytes(self._estimator.model_dir),
        compat.as_bytes("export"))

    export_results = []
    for strategy in self._export_strategies:
      export_results.append(
          strategy.export(
              self._estimator,
              os.path.join(
                  compat.as_bytes(export_dir_base),
                  compat.as_bytes(strategy.name)),
              checkpoint_path=checkpoint_path,
              eval_result=eval_result))

    return export_results 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:experiment.py

示例4: test

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def test(self):
    """Tests training, evaluating and exporting the estimator for a single step.

    Returns:
      The result of the `evaluate` call to the `Estimator`.
    """
    self._call_train(input_fn=self._train_input_fn,
                     steps=1,
                     hooks=self._train_monitors)

    eval_result = self._call_evaluate(input_fn=self._eval_input_fn,
                                      steps=1,
                                      metrics=self._eval_metrics,
                                      name="one_pass")
    _ = self._maybe_export(eval_result)

    return eval_result 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:experiment.py

示例5: _call_train

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _call_train(self, _sentinel=None,  # pylint: disable=invalid-name,
                  input_fn=None, steps=None, hooks=None, max_steps=None):
    if _sentinel is not None:
      raise ValueError("_call_train should be called with keyword args only")

    # Estimator in core cannot work with monitors. We need to convert them
    # to hooks. For Estimator in contrib, it is converted internally. So, it is
    # safe to convert for both cases.
    hooks = monitors.replace_monitors_with_hooks(hooks, self._estimator)
    if self._core_estimator_used:
      return self._estimator.train(input_fn=input_fn,
                                   steps=steps,
                                   max_steps=max_steps,
                                   hooks=hooks)
    else:
      return self._estimator.fit(input_fn=input_fn,
                                 steps=steps,
                                 max_steps=max_steps,
                                 monitors=hooks) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:experiment.py

示例6: _call_evaluate

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _call_evaluate(self, _sentinel=None,  # pylint: disable=invalid-name,
                     input_fn=None, steps=None, metrics=None, name=None,
                     checkpoint_path=None, hooks=None):
    if _sentinel is not None:
      raise ValueError("_call_evaluate should be called with keyword args only")

    if self._core_estimator_used:
      if metrics is not None:
        raise ValueError(
            "`eval_metrics` must be `None` with `tf.estimator.Estimator`")
      return self._estimator.evaluate(input_fn=input_fn,
                                      steps=steps,
                                      name=name,
                                      checkpoint_path=checkpoint_path,
                                      hooks=hooks)
    else:
      return self._estimator.evaluate(input_fn=input_fn,
                                      steps=steps,
                                      metrics=metrics,
                                      name=name,
                                      checkpoint_path=checkpoint_path,
                                      hooks=hooks) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:experiment.py

示例7: __init__

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def __init__(self, id, args, worker_address, sink_address):
        super().__init__()
        self.model_dir = args.model_dir
        self.config_fp = os.path.join(self.model_dir, 'bert_config.json')
        self.checkpoint_fp = os.path.join(self.model_dir, 'bert_model.ckpt')
        self.vocab_fp = os.path.join(args.model_dir, 'vocab.txt')
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_fp)
        self.max_seq_len = args.max_seq_len
        self.worker_id = id
        self.daemon = True
        self.model_fn = model_fn_builder(
            bert_config=modeling.BertConfig.from_json_file(self.config_fp),
            init_checkpoint=self.checkpoint_fp,
            pooling_strategy=args.pooling_strategy,
            pooling_layer=args.pooling_layer
        )
        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.worker_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
        self.estimator = Estimator(self.model_fn, config=RunConfig(session_config=config))
        self.exit_flag = multiprocessing.Event()
        self.logger = set_logger('WORKER-%d' % self.worker_id)
        self.worker_address = worker_address
        self.sink_address = sink_address 
开发者ID:a414351664,项目名称:Bert-TextClassification,代码行数:27,代码来源:server.py

示例8: _increase_eval_step_op

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU system
      before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
开发者ID:ymcui,项目名称:Chinese-XLNet,代码行数:18,代码来源:tpu_estimator.py

示例9: __init__

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def __init__(self, input_fn, batch_axis, ctx):
    """Constructor.

    Args:
      input_fn: input fn for train or eval.
      batch_axis: A python tuple of int values describing how each tensor
        produced by the Estimator `input_fn` should be split across the TPU
        compute shards.
      ctx: A `_InternalTPUContext` instance with mode.

    Raises:
      ValueError: If both `sharded_features` and `num_cores` are `None`.
    """
    self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
        ctx.input_partition_dims)

    self._sharded_per_core = ctx.is_input_sharded_per_core()
    self._input_fn = input_fn
    self._infeed_queue = None
    self._ctx = ctx
    self._batch_axis = batch_axis 
开发者ID:ymcui,项目名称:Chinese-XLNet,代码行数:23,代码来源:tpu_estimator.py

示例10: _convert_train_steps_to_hooks

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _convert_train_steps_to_hooks(self, steps, max_steps):
    with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
      if ctx.is_running_on_cpu():
        return super(TPUEstimator, self)._convert_train_steps_to_hooks(
            steps, max_steps)

    # On TPU.
    if steps is None and max_steps is None:
      raise ValueError(
          'For TPU training, one of `steps` or `max_steps` must be set. '
          'Cannot be both `None`.')

    # Estimator.train has explicit positiveness check.
    if steps is not None:
      util_lib.check_positive_integer(steps, 'Train steps')
    if max_steps is not None:
      util_lib.check_positive_integer(max_steps, 'Train max_steps')

    return [
        _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
    ] 
开发者ID:ymcui,项目名称:Chinese-XLNet,代码行数:23,代码来源:tpu_estimator.py

示例11: _increase_eval_step_op

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def _increase_eval_step_op(iterations_per_loop):
  """Returns an op to increase the eval step for TPU evaluation.

  Args:
    iterations_per_loop: Tensor. The number of eval steps running in TPU
        system before returning to CPU host for each `Session.run`.

  Returns:
    An operation
  """
  eval_step = evaluation._get_or_create_eval_step()  # pylint: disable=protected-access
  # Estimator evaluate increases 1 by default. So, we increase the difference.
  return state_ops.assign_add(
      eval_step,
      math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
      use_locking=True) 
开发者ID:kimiyoung,项目名称:transformer-xl,代码行数:18,代码来源:tpu_estimator.py

示例12: every_n_step_end

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def every_n_step_end(self, step, outputs):
    super(ExportMonitor, self).every_n_step_end(step, outputs)
    try:
      if isinstance(self._estimator, core_estimator.Estimator):
        raise ValueError(
            "ExportMonitor does not support `tf.estimator.Estimator. `. "
            "Please pass an ExportStrategy to Experiment instead.")
      self._last_export_dir = self._estimator.export(
          self.export_dir,
          exports_to_keep=self.exports_to_keep,
          signature_fn=self.signature_fn,
          input_fn=self._input_fn,
          default_batch_size=self._default_batch_size,
          input_feature_key=self._input_feature_key,
          use_deprecated_input_fn=self._use_deprecated_input_fn)
    except RuntimeError:
      # Currently we are not syncronized with saving checkpoints, which leads to
      # runtime errors when we are calling export on the same global step.
      # Exports depend on saved checkpoints for constructing the graph and
      # getting the global step from the graph instance saved in the checkpoint.
      # If the checkpoint is stale with respect to current step, the global step
      # is taken to be the last saved checkpoint's global step and exporter
      # doesn't export the same checkpoint again with the following error.
      logging.info("Skipping exporting because the existing checkpoint has "
                   "already been exported. "
                   "Consider exporting less frequently.") 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:monitors.py

示例13: replace_monitors_with_hooks

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def replace_monitors_with_hooks(monitors_or_hooks, estimator):
  """Wraps monitors with a hook.

  `Monitor` is deprecated in favor of `SessionRunHook`. If you're using a
  monitor, you can wrap it with a hook using function. It is recommended to
  implement hook version of your monitor.

  Args:
    monitors_or_hooks: A `list` may contain both monitors and hooks.
    estimator: An `Estimator` that monitor will be used with.

  Returns:
    Returns a list of hooks. If there is any monitor in the given list, it is
    replaced by a hook.
  """
  monitors_or_hooks = monitors_or_hooks or []
  hooks = [
      m for m in monitors_or_hooks
      if isinstance(m, session_run_hook.SessionRunHook)
  ]

  deprecated_monitors = [
      m for m in monitors_or_hooks
      if not isinstance(m, session_run_hook.SessionRunHook)
  ]

  if not estimator.config.is_chief:
    # Prune list of monitor to the ones runnable on all workers.
    deprecated_monitors = [
        m for m in deprecated_monitors if m.run_on_all_workers
    ]

  # Setup monitors.
  for monitor in deprecated_monitors:
    monitor.set_estimator(estimator)

  if deprecated_monitors:
    hooks.append(RunHookAdapterForMonitors(deprecated_monitors))

  return hooks 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:42,代码来源:monitors.py

示例14: get_estimator

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def get_estimator(self, tf):
        from tensorflow.python.estimator.estimator import Estimator
        from tensorflow.python.estimator.run_config import RunConfig
        from tensorflow.python.estimator.model_fn import EstimatorSpec

        def model_fn(features, labels, mode, params):
            with tf.gfile.GFile(self.graph_path, 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())

            input_names = ['input_ids', 'input_mask', 'input_type_ids']

            output = tf.import_graph_def(graph_def,
                                         input_map={k + ':0': features[k] for k in input_names},
                                         return_elements=['final_encodes:0'])

            return EstimatorSpec(mode=mode, predictions={
                'client_id': features['client_id'],
                'encodes': output[0]
            })

        config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
        config.log_device_placement = False
        # session-wise XLA doesn't seem to work on tf 1.10
        # if args.xla:
        #     config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

        return Estimator(model_fn=model_fn, config=RunConfig(session_config=config)) 
开发者ID:hanxiao,项目名称:bert-as-service,代码行数:32,代码来源:__init__.py

示例15: export_estimator_savedmodel

# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import Estimator [as 别名]
def export_estimator_savedmodel(estimator,
                                export_dir_base,
                                serving_input_receiver_fn,
                                assets_extra=None,
                                as_text=False,
                                checkpoint_path=None,
                                strip_default_attrs=False):
  """Export `Estimator` trained model for TPU inference.

  Args:
    estimator: `Estimator` with which model has been trained.
    export_dir_base: A string containing a directory in which to create
      timestamped subdirectories containing exported SavedModels.
    serving_input_receiver_fn: A function that takes no argument and returns a
      `ServingInputReceiver` or `TensorServingInputReceiver`.
    assets_extra: A dict specifying how to populate the assets.extra directory
      within the exported SavedModel, or `None` if no extra assets are needed.
    as_text: whether to write the SavedModel proto in text format.
    checkpoint_path: The checkpoint path to export.  If `None` (the default),
      the most recent checkpoint found within the model directory is chosen.
    strip_default_attrs: Boolean. If `True`, default-valued attributes will be
      removed from the NodeDefs.

  Returns:
    The string path to the exported directory.
  """
  # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
  # `estimator.config`.
  config = tpu_config.RunConfig(model_dir=estimator.model_dir)
  est = TPUEstimator(
      estimator._model_fn,  # pylint: disable=protected-access
      config=config,
      params=estimator.params,
      use_tpu=True,
      train_batch_size=2048,  # Does not matter.
      eval_batch_size=2048,  # Does not matter.
  )
  return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
                               assets_extra, as_text, checkpoint_path,
                               strip_default_attrs) 
开发者ID:ymcui,项目名称:Chinese-XLNet,代码行数:42,代码来源:tpu_estimator.py


注:本文中的tensorflow.python.estimator.estimator.Estimator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。