本文整理汇总了Python中tensorflow.estimator方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.estimator方法的具体用法?Python tensorflow.estimator怎么用?Python tensorflow.estimator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.estimator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _gen_monitored_train_and_evaluate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def _gen_monitored_train_and_evaluate(client: skein.ApplicationClient):
task = cluster.get_task()
def train_and_evaluate(
estimator: tf.estimator,
train_spec: tf.estimator.TrainSpec,
eval_spec: tf.estimator.EvalSpec):
event.broadcast_train_eval_start_timer(client, task)
tf.estimator.train_and_evaluate(
estimator,
train_spec,
eval_spec
)
event.broadcast_train_eval_stop_timer(client, task)
return train_and_evaluate
示例2: _shutdown_container
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def _shutdown_container(
client: skein.ApplicationClient,
cluster_tasks: List[str],
run_config: tf.estimator.RunConfig,
thread: Optional[MonitoredThread]
) -> None:
# Wait for all tasks connected to this one. The set of tasks to
# wait for contains all tasks in the cluster, or the ones
# matching ``device_filters`` if set. The implementation assumes
# that ``device_filers`` are symmetric.
exception = thread.exception if thread is not None and isinstance(thread, MonitoredThread) \
else None
task = cluster.get_task()
event.stop_event(client, task, exception)
wait_for_connected_tasks(
client,
cluster_tasks,
getattr(run_config.session_config, "device_filters", []))
event.broadcast_container_stop_time(client, task)
if exception is not None:
raise exception from None
示例3: initialize_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def initialize_graph(self):
if not self.inference:
with self.sess.graph.as_default():
features, labels = get_inference_input()
estimator_spec = model_fn(features, labels,
tf.estimator.ModeKeys.PREDICT, self.hparams)
self.inference_input = features
self.inference_output = estimator_spec.predictions
if self.save_file is not None:
self.initialize_weights(self.save_file)
else:
self.sess.run(tf.global_variables_initializer())
else:
input_name = "pos_tensor"
input_tensors = self.graph.get_tensor_by_name("import/" + input_name + ":0")
self.inference_input = input_tensors
output_names = ["policy_output", "value_output"]
output_tensors = []
for name in output_names:
output_tensors.append(self.graph.get_tensor_by_name("import/" + name + ":0"))
self.inference_output = output_tensors
示例4: export_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def export_model(working_dir, model_path):
"""Take the latest checkpoint and export it to model_path for selfplay.
Assumes that all relevant model files are prefixed by the same name.
(For example, foo.index, foo.meta and foo.data-00000-of-00001).
Args:
working_dir: The directory where tf.estimator keeps its checkpoints
model_path: The path (can be a gs:// path) to export model to
"""
estimator = tf.estimator.Estimator(model_fn, model_dir=working_dir,
params='ignored')
latest_checkpoint = estimator.latest_checkpoint()
all_checkpoint_files = tf.gfile.Glob(latest_checkpoint + '*')
for filename in all_checkpoint_files:
suffix = filename.partition(latest_checkpoint)[2]
destination_path = model_path + suffix
print("Copying {} to {}".format(filename, destination_path))
tf.gfile.Copy(filename, destination_path)
示例5: bootstrap
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def bootstrap(
working_dir: 'tf.estimator working directory. If not set, defaults to a random tmp dir'=None,
model_save_path: 'Where to export the first bootstrapped generation'=None):
qmeas.start_time('bootstrap')
if working_dir is None:
with tempfile.TemporaryDirectory() as working_dir:
_ensure_dir_exists(working_dir)
_ensure_dir_exists(os.path.dirname(model_save_path))
dual_net.bootstrap(working_dir)
dual_net.export_model(working_dir, model_save_path)
else:
_ensure_dir_exists(working_dir)
_ensure_dir_exists(os.path.dirname(model_save_path))
dual_net.bootstrap(working_dir)
dual_net.export_model(working_dir, model_save_path)
qmeas.stop_time('bootstrap')
示例6: validate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def validate(
working_dir: 'tf.estimator working directory',
*tf_record_dirs: 'Directories where holdout data are',
checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,
validate_name: 'Name for validation set (i.e., selfplay or human)'=None):
qmeas.start_time('validate')
tf_records = []
with timer("Building lists of holdout files"):
for record_dir in tf_record_dirs:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))
first_record = os.path.basename(tf_records[0])
last_record = os.path.basename(tf_records[-1])
with timer("Validating from {} to {}".format(first_record, last_record)):
dual_net.validate(
working_dir, tf_records, checkpoint_name=checkpoint_name,
name=validate_name)
qmeas.stop_time('validate')
示例7: normalize_weights
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def normalize_weights(self, labels, weights):
"""Normalizes weights needed for tf.estimator (not tf.keras).
This is needed for `tf.estimator` given that the reduction may be
`SUM_OVER_NONZERO_WEIGHTS`. This function is not needed after we migrate
from the deprecated reduction to `SUM` or `SUM_OVER_BATCH_SIZE`.
Args:
labels: A `Tensor` of shape [batch_size, list_size] representing graded
relevance.
weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
weights.
Returns:
The normalized weights.
"""
del labels
return 1.0 if weights is None else weights
示例8: compute
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def compute(self, labels, logits, weights, reduction):
"""Computes the reduced loss for tf.estimator (not tf.keras).
Note that this function is not compatible with keras.
Args:
labels: A `Tensor` of the same shape as `logits` representing graded
relevance.
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
weights.
reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch.
Returns:
Reduced loss for training and eval.
"""
losses, loss_weights = self.compute_unreduced_loss(labels, logits)
weights = tf.multiply(self.normalize_weights(labels, weights), loss_weights)
return tf.compat.v1.losses.compute_weighted_loss(
losses, weights, reduction=reduction)
示例9: eval_metric
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def eval_metric(self, labels, logits, weights):
"""Computes the eval metric for the loss in tf.estimator (not tf.keras).
Note that this function is not compatible with keras.
Args:
labels: A `Tensor` of the same shape as `logits` representing graded
relevance.
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
weights.
Returns:
A metric op.
"""
losses, loss_weights = self.compute_unreduced_loss(labels, logits)
weights = tf.multiply(self.normalize_weights(labels, weights), loss_weights)
return tf.compat.v1.metrics.mean(losses, weights)
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def __init__(self, transform_fn=None):
"""Constructor for the common components of all ranking models.
Args:
transform_fn: (function) A user-provided function that transforms raw
features into dense Tensors with the following signature:
* Args:
`features`: A dict of Tensors or SparseTensors that contains the raw
features from an input_fn.
`mode`: Optional. See estimator `ModeKeys`.
`params`: Optional. See tf.estimator model_fn. Hyperparameters for the
model.
* Returns:
`context_features`: A dict of `Tensor`s with shape [batch_size, ...]
`example_features`: A dict of `Tensor`s with shape [batch_size,
list_size, ...]
"""
if transform_fn is None:
self._transform_fn = feature.make_identity_transform_fn({})
else:
self._transform_fn = transform_fn
示例11: dnn_classifier
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def dnn_classifier(self):
"""Builds the DNN model(classifier)
with the parameters parsed from the user input
Returns : tf.estimator object, Canned estimator of DNN Classifier
"""
return tf.estimator.DNNClassifier(
config=self.config,
feature_columns=self.deep_columns,
hidden_units=self.hidden_units,
n_classes=self.n_classes,
weight_column=self.weight_column,
label_vocabulary=self.label_vocabulary,
optimizer=self.dnn_optimizer,
activation_fn=self.activation_fn,
dropout=self.dropout,
input_layer_partitioner=self.input_layer_partitioner,
warm_start_from=self.warm_start_from,
loss_reduction=self.loss_reduction
)
示例12: dnn_regressor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def dnn_regressor(self):
"""Builds the DNN model(regressor)
with the parameters parsed from the user input
Returns : tf.estimator object, Canned estimator of DNN Regressor
"""
return tf.estimator.DNNRegressor(
config=self.config,
feature_columns=self.deep_columns,
hidden_units=self.hidden_units,
label_dimension=self.label_dimension,
weight_column=self.weight_column,
optimizer=self.dnn_optimizer,
activation_fn=self.activation_fn,
dropout=self.dropout,
input_layer_partitioner=self.input_layer_partitioner,
warm_start_from=self.warm_start_from,
loss_reduction=self.loss_reduction
)
示例13: combined_classifier
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def combined_classifier(self):
"""Builds a combined DNN and linear classifier parsed from user input.
Returns : tf.estimator object, Canned estimator of Combined Classifier
"""
return tf.estimator.DNNLinearCombinedClassifier(
config=self.config,
linear_feature_columns=self.feature_columns,
linear_optimizer=self.linear_optimizer,
dnn_feature_columns=self.deep_columns,
dnn_hidden_units=self.hidden_units,
dnn_activation_fn=self.activation_fn,
dnn_dropout=self.dropout,
n_classes=self.n_classes,
weight_column=self.weight_column,
label_vocabulary=self.label_vocabulary,
input_layer_partitioner=self.input_layer_partitioner,
warm_start_from=self.warm_start_from,
loss_reduction=self.loss_reduction,
batch_norm=self.batch_norm,
linear_sparse_combiner=self.linear_sparse_combiner
)
示例14: polynomial_regressor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def polynomial_regressor(self):
"""Builds the polynomial regression model
with the parameters parsed from the user input
Returns: A Custom Estimator of Polynomial regression
"""
return tf.estimator.Estimator(
model_fn=self.poly_regression_model_fn,
model_dir=self.model_dir, config=self.config,
params={
'batch_size': self.batch_size,
'polynomial_degree': self.polynomial_degree,
'feature_names': self.feature_names,
'optimizer': self.optimizer
},
warm_start_from=self.warm_start_from
)
示例15: polynomial_classifier
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import estimator [as 别名]
def polynomial_classifier(self):
"""Builds the logistic classification model
with the parameters parsed from the user input
Returns: A Custom Estimator of Polynomial classifier
"""
return tf.estimator.Estimator(
model_fn=self.poly_classification_model_fn,
model_dir=self.model_dir,
config=self.config,
params={
'degree': self.polynomial_degree,
'feature_names': self.feature_names,
'batch_size': self.batch_size,
'optimizer': self.optimizer
}
)