当前位置: 首页>>代码示例>>Python>>正文


Python v1.Estimator方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.Estimator方法的典型用法代码示例。如果您正苦于以下问题:Python v1.Estimator方法的具体用法?Python v1.Estimator怎么用?Python v1.Estimator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.Estimator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_run_config

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def get_run_config(self):
    """Get the RunConfig for Estimator model.

    Returns:
      tf.estimator.RunConfig() for this model.
    """
    return gin_configurable_run_config_cls(
        session_config=self.get_session_config()) 
开发者ID:google-research,项目名称:tensor2robot,代码行数:10,代码来源:abstract_model.py

示例2: get_tpu_run_config

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def get_tpu_run_config(self):
    """Get the TPU RunConfig for Estimator model.

    Returns:
      contrib_tpu.RunConfig() for this model.
    """
    return gin_configurable_tpu_run_config_cls(
        master=FLAGS.master, tpu_config=gin_configurable_tpu_config_cls()) 
开发者ID:google-research,项目名称:tensor2robot,代码行数:10,代码来源:abstract_model.py

示例3: get_session_config

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def get_session_config(self):
    """Get the session config for Estimator model.

    Defaults to None which tells tf.Estimator to use its default session config.
    Not used in TPU jobs at the moment.

    Returns:
      None, or the desired session config.
    """
    return None 
开发者ID:google-research,项目名称:tensor2robot,代码行数:12,代码来源:abstract_model.py

示例4: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def main(unused_argv):
  logging.set_verbosity(logging.INFO)
  if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
    raise ValueError('Number of microbatches should divide evenly batch_size')

  # Instantiate the tf.Estimator.
  mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                            model_dir=FLAGS.model_dir)

  # Training loop.
  steps_per_epoch = 60000 // FLAGS.batch_size
  for epoch in range(1, FLAGS.epochs + 1):
    start_time = time.time()
    # Train the model for one epoch.
    mnist_classifier.train(
        input_fn=common.make_input_fn('train', FLAGS.batch_size),
        steps=steps_per_epoch)
    end_time = time.time()
    logging.info('Epoch %d time in seconds: %.2f', epoch, end_time - start_time)

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(
        input_fn=common.make_input_fn('test', FLAGS.batch_size, 1))
    test_accuracy = eval_results['accuracy']
    print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))

    # Compute the privacy budget expended.
    if FLAGS.dpsgd:
      if FLAGS.noise_multiplier > 0.0:
        eps, _ = compute_dp_sgd_privacy_lib.compute_dp_sgd_privacy(
            60000, FLAGS.batch_size, FLAGS.noise_multiplier, epoch, 1e-5)
        print('For delta=1e-5, the current epsilon is: %.2f' % eps)
      else:
        print('Trained with DP-SGD but with zero noise.')
    else:
      print('Trained with vanilla non-private SGD optimizer') 
开发者ID:tensorflow,项目名称:privacy,代码行数:38,代码来源:mnist_dpsgd_tutorial.py

示例5: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Load training and test data.
  train_data, train_labels, test_data, test_labels = load_mnist()

  # Instantiate the tf.Estimator.
  mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)

  # Create tf.Estimator input functions for the training and test data.
  train_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={'x': train_data},
      y=train_labels,
      batch_size=FLAGS.batch_size,
      num_epochs=FLAGS.epochs,
      shuffle=True)
  eval_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={'x': test_data},
      y=test_labels,
      num_epochs=1,
      shuffle=False)

  # Training loop.
  steps_per_epoch = 60000 // FLAGS.batch_size
  for epoch in range(1, FLAGS.epochs + 1):
    # Train the model for one epoch.
    mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)

    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
    test_accuracy = eval_results['accuracy']
    print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy)) 
开发者ID:tensorflow,项目名称:privacy,代码行数:34,代码来源:mnist_scratch.py

示例6: input_function

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def input_function(is_train):
  """Pulls in pattern dataset.

  Args:
    is_train: A boolean indicating whether we are constructing the dataset for
    the train set or the eval set.

  Returns:
    dataset: A tf.data.Dataset object containing the features and labels.

  Raises:
    NameError: If FLAGS.task_name is not "pattern" or "symbolic".
  """
  if FLAGS.task_name == "pattern":
    dataset = synthetic_dataset.get_pattern_dataset(
        FLAGS.num_examples,
        FLAGS.num_sets_per_sequence,
        FLAGS.pattern_size,
        selective=FLAGS.selective_task,
        num_patterns_store=FLAGS.num_patterns_store)
  elif FLAGS.task_name == "symbolic":
    dataset = synthetic_dataset.get_symbolic_dataset(
        is_train,
        FLAGS.num_examples,
        FLAGS.num_sets_per_sequence)
  else:
    raise NameError("Task %s not found" % FLAGS.task_name)

  dataset = (
      dataset.repeat(FLAGS.num_epochs).shuffle(buffer_size=1000)
      .batch(FLAGS.batch_size, drop_remainder=True))

  # Estimator expects a tuple.
  dataset = dataset.map(lambda d: (d, d["targets"]))

  return dataset 
开发者ID:google-research,项目名称:language,代码行数:38,代码来源:run_models.py

示例7: experiment_function

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def experiment_function(run_config, hparams):
  """An experiment function satisfying the tf.estimator API.

  Args:
    run_config: A learn_running.EstimatorConfig object.
    hparams: Unused set of hyperparams.

  Returns:
    experiment: A tf.contrib.learn.Experiment object.
  """
  del hparams

  train_input_fn = partial(input_function, is_train=True)
  eval_input_fn = partial(input_function, is_train=False)

  estimator = tf.estimator.Estimator(
      model_fn=model_function,
      config=run_config,
      model_dir=run_config.model_dir)

  experiment = tf.contrib.learn.Experiment(
      estimator=estimator,
      train_input_fn=train_input_fn,
      eval_input_fn=eval_input_fn,
      eval_steps=FLAGS.num_eval_steps,
  )

  return experiment 
开发者ID:google-research,项目名称:language,代码行数:30,代码来源:run_models.py

示例8: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  if FLAGS.experiment_logdir:
    write_flags_to_file(FLAGS, FLAGS.experiment_logdir + "/hparams.txt")

  # TODO(djweiss): Finish third-party Estimator code here. 
开发者ID:google-research,项目名称:language,代码行数:9,代码来源:run_models.py

示例9: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def __init__(self,
               preprocessor_cls=None,
               create_optimizer_fn=optimizers.default_create_optimizer_fn,
               device_type = DEVICE_TYPE_CPU,
               summarize_gradients = True,
               use_sync_replicas_optimizer = False,
               use_avg_model_params = False,
               init_from_checkpoint_fn=None):
    """Base constructor to be used by subclass.

    Args:
      preprocessor_cls: (Optional) A class derived from
        preprocessors.AbstractPreprocessor.
      create_optimizer_fn: A callable function which returns an instance of a
        subclass of tf.train.Optimizer. We couldn't take an optimizer instance
        here because some optimizer's constructor may need to access the
        variables in the graph, which will be created by Estimator when calling
        model_fn. More precisely we will only create an instance during training
        (mode == ModeKeys.TRAIN) within the _optimizer property call which will
        wrap the optimizer instance for GPU towers or TPUs if necessary. The
        _optimizer property is only used within create_train_op function.
      device_type: The device type this model will be deployed on (
        DEVICE_TYPE_CPU, DEVICE_TYPE_GPU, DEVICE_TYPE_TPU).
      summarize_gradients: If True summaries for the gradients produced by the
        train_op will be created. Note, we will automatically disable these
        summaries in case of DEVICE_TYPE_TPU.
      use_sync_replicas_optimizer: If True, synchronize gradient updates from
        the different replicas. (GPU-only, since TPUs are already synchronous).
      use_avg_model_params: During training use a MovingAverageOptimizer and
        swapping saver to compute a running average of the model variables for
        inference.
      init_from_checkpoint_fn: A function that calls
        tf.train.init_from_checkpoint.
    """
    self._preprocessor_cls = preprocessor_cls
    self._create_optimizer_fn = create_optimizer_fn
    self._device_type = device_type
    self._summarize_gradients = summarize_gradients
    self._use_sync_replicas_optimizer = use_sync_replicas_optimizer
    self._sync_replicas_optimizer = None
    self._use_avg_model_params = use_avg_model_params
    self._init_from_checkpoint_fn = init_from_checkpoint_fn
    self._optimizer = None  # type: Optional[tf.train.Optimizer]
    self._scaffold_fn = tf.train.Scaffold 
开发者ID:google-research,项目名称:tensor2robot,代码行数:46,代码来源:abstract_model.py

示例10: inference_network_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def inference_network_fn(
      self,
      features,
      labels,
      mode,
      config = None,
      params = None):
    """The inference network implementation.

    This creates the main network based on features.
    Optionally (mode=ModeKeys.TRAIN or ModeKeys.EVAL) the model can do
    additional processing on labels, however, it has to be ensured that this is
    optional and the graph is fully operational without labels. At inference
    time we will have no access to labels. Tensors which are required for loss
    computation or debugging must be put into the inference_outputs dict.
    Having a dedicated inference_network_fn allows to compose new networks by
    using other TFModels.

    Please, use the following pattern to add not supported tpu model components
    such as tf.summary.*
    if self.use_summaries(params):
      # Do operations which are not supported on tpus.

    If your model does not support TPUs at all, please call the following
    function.
    self.raise_no_tpu_support()

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      labels: This is the second item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
        receive what is passed to Estimator in config parameter, or the default
        config (tf.estimator.RunConfig). Allows updating things in your model_fn
        based on  configuration such as num_ps_replicas, or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.

    Returns:
      inference_outputs: A dict with output tensors.
    """ 
开发者ID:google-research,项目名称:tensor2robot,代码行数:49,代码来源:abstract_model.py

示例11: model_train_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def model_train_fn(self,
                     features,
                     labels,
                     inference_outputs,
                     mode,
                     config = None,
                     params = None):
    """The training model implementation.

    This model_fn should add the loss computation based on the inference_outputs
    and labels. For better debugging we also provide access to the input
    features. Note, no new variables should be generated in this model_fn since
    the model_inference_fn and the maybe_init_from_checkpoint function would
    not have access to these variables. We output the final loss (scalar) and
    a dict of optional train_outputs which might be useful for the
    model_eval_fn.

    Please, use the following pattern to add not supported tpu model components
    such as tf.summary.*
    if self.use_summaries(params):
      # Do operations which are not supported on tpus.

    If your model does not support TPUs at all, please call the following
    function.
    self.raise_no_tpu_support()

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      labels: This is the second item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      inference_outputs: A dict containing the output tensors of
        model_inference_fn.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
        receive what is passed to Estimator in config parameter, or the default
        config (tf.estimator.RunConfig). Allows updating things in your model_fn
        based on  configuration such as num_ps_replicas, or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.

    Returns:
      loss: The loss we will optimize.
      train_outputs: (Optional) A dict with additional tensors the training
        model generates. We output these tensors such that model_eval_fn could
        introspect these tensors.
    """ 
开发者ID:google-research,项目名称:tensor2robot,代码行数:53,代码来源:abstract_model.py

示例12: model_eval_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def model_eval_fn(self,
                    features,
                    labels,
                    inference_outputs,
                    train_loss,
                    train_outputs,
                    mode,
                    config = None,
                    params = None):
    """The eval model implementation, by default we report the loss for eval.

    This function should add the eval_metrics computation based on the
    inference_outputs, labels and the train_loss. For better debugging we also
    provide access to the input features and the train_outputs. Note, no new
    variables should be generated in this model_fn since the model_inference_fn
    and the maybe_init_from_checkpoint function would not have access to these
    variables.

    Please, use the following pattern to add not supported tpu model components
    such as tf.summary.*
    if self.use_summaries(params):
      # Do operations which are not supported on tpus.

    If your model does not support TPUs at all, please call the following
    function.
    self.raise_no_tpu_support()

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      labels: This is the second item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      inference_outputs: A dict containing the output tensors of
        model_inference_fn.
      train_loss: The final loss from model_train_fn.
      train_outputs: A dict containing the output tensors (dict) of
        model_train_fn.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
        receive what is passed to Estimator in config parameter, or the default
        config (tf.estimator.RunConfig). Allows updating things in your model_fn
        based on  configuration such as num_ps_replicas, or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.

    Returns:
      eval_metrics: A tuple of (metric_fn, metric_fn_inputs) where metric_fn
        is a dict with {metric_description: tf.metrics.*}.
    """
    del features, labels, inference_outputs, train_loss, train_outputs
    del mode, config, params
    # By default we don't have any eval_metrics. The loss computation used
    # to optimize the model_fn will be reported for the model_eval_fn as well.
    # Hence, by default the EVAL mode can be used to determine the loss
    # performance on the eval dataset or even a larger train dataset.
    return None 
开发者ID:google-research,项目名称:tensor2robot,代码行数:62,代码来源:abstract_model.py

示例13: add_summaries

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def add_summaries(self,
                    features,
                    labels,
                    inference_outputs,
                    train_loss,
                    train_outputs,
                    mode,
                    config = None,
                    params = None):
    """Add summaries to the graph.

    Having a central place to add all summaries to the graph is helpful in order
    to compose models. For example, if an inference_network_fn is used within
    a while loop no summaries can be added. This function will allow to add
    summaries after the while loop has been processed.

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      labels: This is the second item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      inference_outputs: A dict containing the output tensors of
        model_inference_fn.
      train_loss: The final loss from model_train_fn.
      train_outputs: A dict containing the output tensors (dict) of
        model_train_fn.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
        receive what is passed to Estimator in config parameter, or the default
        config (tf.estimator.RunConfig). Allows updating things in your model_fn
        based on  configuration such as num_ps_replicas, or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.
    """
    del features, labels, inference_outputs, train_loss, train_outputs, mode
    del config
    if not self.use_summaries(params):
      return 
开发者ID:google-research,项目名称:tensor2robot,代码行数:44,代码来源:abstract_model.py

示例14: lr_model_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def lr_model_fn(features, labels, mode, nclasses, dim):
  """Model function for logistic regression."""
  input_layer = tf.reshape(features['x'], tuple([-1]) + dim)

  logits = tf.layers.dense(
      inputs=input_layer,
      units=nclasses,
      kernel_regularizer=tf.contrib.layers.l2_regularizer(
          scale=FLAGS.regularizer),
      bias_regularizer=tf.contrib.layers.l2_regularizer(
          scale=FLAGS.regularizer))

  # Calculate loss as a vector (to support microbatches in DP-SGD).
  vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits) + tf.losses.get_regularization_loss()
  # Define mean of loss across minibatch (for reporting through tf.Estimator).
  scalar_loss = tf.reduce_mean(vector_loss)

  # Configure the training op (for TRAIN mode).
  if mode == tf.estimator.ModeKeys.TRAIN:
    if FLAGS.dpsgd:
      # The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
      # ||x|| is the norm of the data.
      # We don't use microbatches (thus speeding up computation), since no
      # clipping is necessary due to data normalization.
      optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
          l2_norm_clip=math.sqrt(2 * (FLAGS.data_l2_norm**2 + 1)),
          noise_multiplier=FLAGS.noise_multiplier,
          num_microbatches=1,
          learning_rate=FLAGS.learning_rate)
      opt_loss = vector_loss
    else:
      optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
      opt_loss = scalar_loss
    global_step = tf.train.get_global_step()
    train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
    # In the following, we pass the mean of the loss (scalar_loss) rather than
    # the vector_loss because tf.estimator requires a scalar loss. This is only
    # used for evaluation and debugging by tf.estimator. The actual loss being
    # minimized is opt_loss defined above and passed to optimizer.minimize().
    return tf.estimator.EstimatorSpec(
        mode=mode, loss=scalar_loss, train_op=train_op)

  # Add evaluation metrics (for EVAL mode).
  elif mode == tf.estimator.ModeKeys.EVAL:
    eval_metric_ops = {
        'accuracy':
            tf.metrics.accuracy(
                labels=labels, predictions=tf.argmax(input=logits, axis=1))
    }
    return tf.estimator.EstimatorSpec(
        mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) 
开发者ID:tensorflow,项目名称:privacy,代码行数:54,代码来源:mnist_lr_tutorial.py

示例15: main

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Estimator [as 别名]
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  if FLAGS.data_l2_norm <= 0:
    raise ValueError('data_l2_norm must be positive.')
  if FLAGS.dpsgd and FLAGS.learning_rate > 8 / FLAGS.data_l2_norm**2:
    raise ValueError('The amplification-by-iteration analysis requires'
                     'learning_rate <= 2 / beta, where beta is the smoothness'
                     'of the loss function and is upper bounded by ||x||^2 / 4'
                     'with ||x|| being the largest L2 norm of the samples.')

  # Load training and test data.
  # Smoothness = ||x||^2 / 4 where ||x|| is the largest L2 norm of the samples.
  # To get bounded smoothness, we normalize the data such that each sample has a
  # bounded L2 norm.
  train_data, train_labels, test_data, test_labels = load_mnist(
      data_l2_norm=FLAGS.data_l2_norm)

  # Instantiate tf.Estimator.
  # pylint: disable=g-long-lambda
  model_fn = lambda features, labels, mode: lr_model_fn(
      features, labels, mode, nclasses=10, dim=train_data.shape[1:])
  mnist_classifier = tf.estimator.Estimator(
      model_fn=model_fn, model_dir=FLAGS.model_dir)

  # Create tf.Estimator input functions for the training and test data.
  # To analyze the per-user privacy loss, we keep the same orders of samples in
  # each epoch by setting shuffle=False.
  train_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={'x': train_data},
      y=train_labels,
      batch_size=FLAGS.batch_size,
      num_epochs=FLAGS.epochs,
      shuffle=False)
  eval_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)

  # Train the model.
  num_samples = train_data.shape[0]
  steps_per_epoch = num_samples // FLAGS.batch_size

  mnist_classifier.train(
      input_fn=train_input_fn, steps=steps_per_epoch * FLAGS.epochs)

  # Evaluate the model and print results.
  eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
  print('Test accuracy after {} epochs is: {:.2f}'.format(
      FLAGS.epochs, eval_results['accuracy']))

  if FLAGS.dpsgd:
    print_privacy_guarantees(
        epochs=FLAGS.epochs,
        batch_size=FLAGS.batch_size,
        samples=num_samples,
        noise_multiplier=FLAGS.noise_multiplier,
    ) 
开发者ID:tensorflow,项目名称:privacy,代码行数:57,代码来源:mnist_lr_tutorial.py


注:本文中的tensorflow.compat.v1.Estimator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。