本文整理汇总了Python中tensorflow.contrib.framework.get_global_step函数的典型用法代码示例。如果您正苦于以下问题:Python get_global_step函数的具体用法?Python get_global_step怎么用?Python get_global_step使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_global_step函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_train_ops
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
# TODO(ipolosukhin): Move this to TensorFlowEstimator when
# moving out training.
if isinstance(self.learning_rate, types.FunctionType):
learning_rate = self.learning_rate(contrib_framework.get_global_step())
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
else:
optimizer = self.optimizer
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
# Add update ops.
train_op = control_flow_ops.group(
train_op, *ops.get_collection('update_ops'))
return train_op, loss
示例2: before_run
def before_run(self, run_context):
loss = (self.loss_op if self.loss_op is not None else
run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0])
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': loss})
示例3: model_fn
def model_fn(features, labels, mode):
"""Builds generic graph for training or eval."""
# TODO logits = A tensor representing the pre-softmax likelyhood of
# each digit.
tensors = {}
# Add to the Graph the Ops for loss calculation.
if mode == ModeKeys.INFER:
# TODO tensors['digit'] = Tensor representing the predicted digit for 'features'
# Since 'labels' is None we can't calculate a loss
loss_op = None
else:
# TODO loss_op = Operation to calculate loss
tensors['loss'] = loss_op
tf.scalar_summary('loss', loss_op)
# Add to the Graph the Ops for accuracy calculation.
if mode == ModeKeys.EVAL:
# TODO accuracy_op = Calculate the accuracy of the inferred digits given 'labels'
tensors['accuracy'] = accuracy_op
tf.scalar_summary('training/hptuning/metric', accuracy_op)
# Add to the Graph the Ops that calculate and apply gradients.
if mode == ModeKeys.TRAIN:
global_step = framework.get_global_step()
# TODO train_op = the gradient descent optimizer with the given learning rate
# that minimizes the loss
else:
train_op = None
return tensors, loss_op, train_op
示例4: softmax_model
def softmax_model(X, Y_, mode):
Ylogits = layers.linear(X, 10)
predict = tf.nn.softmax(Ylogits)
classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.003, "Adam")
return {"predictions":predict, "classes": classes}, loss, train_op
示例5: _get_train_ops
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
features, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner,
**self.construction_args)
epoch = None
if self.data_feeder:
epoch = self.data_feeder.make_epoch_variable()
train = control_flow_ops.group(
graph_builder.training_graph(
features, labels, data_spec=spec, epoch=epoch,
**self.training_args),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
self.training_loss = graph_builder.training_loss()
return train, self.training_loss
示例6: auto_encoder
def auto_encoder(x_1, x_2, x_mask_1, x_mask_2, y, dropout, opt):
x_1_emb, W_emb = embedding(x_1, opt) # batch L emb
x_2_emb = tf.nn.embedding_lookup(W_emb, x_2)
x_1_emb = tf.nn.dropout(x_1_emb, dropout) # batch L emb
x_2_emb = tf.nn.dropout(x_2_emb, dropout) # batch L emb
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
x_1_emb = layers.fully_connected(tf.squeeze(x_1_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=None) # batch L emb
x_2_emb = layers.fully_connected(tf.squeeze(x_2_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=True)
x_1_emb = tf.expand_dims(x_1_emb, 3) # batch L emb 1
x_2_emb = tf.expand_dims(x_2_emb, 3)
if opt.encoder == 'aver':
H_enc_1 = aver_emb_encoder(x_1_emb, x_mask_1)
H_enc_2 = aver_emb_encoder(x_2_emb, x_mask_2)
elif opt.encoder == 'max':
H_enc_1 = max_emb_encoder(x_1_emb, x_mask_1, opt)
H_enc_2 = max_emb_encoder(x_2_emb, x_mask_2, opt)
elif opt.encoder == 'concat':
H_enc_1 = concat_emb_encoder(x_1_emb, x_mask_1, opt)
H_enc_2 = concat_emb_encoder(x_2_emb, x_mask_2, opt)
# discriminative loss term
if opt.combine_enc == 'mult':
H_enc = tf.multiply(H_enc_1, H_enc_2) # batch * n_gan
if opt.combine_enc == 'concat':
H_enc = tf.concat([H_enc_1, H_enc_2], 1)
if opt.combine_enc == 'sub':
H_enc = tf.subtract(H_enc_1, H_enc_2)
if opt.combine_enc == 'mix':
H_1 = tf.multiply(H_enc_1, H_enc_2)
H_2 = tf.concat([H_enc_1, H_enc_2], 1)
H_3 = tf.subtract(H_enc_1, H_enc_2)
H_enc = tf.concat([H_1, H_2, H_3], 1)
# calculate the accuracy
logits = discriminator_2layer(H_enc, opt, dropout, prefix='classify_', num_outputs=opt.category, is_reuse=None)
prob = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(prob, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_op = layers.optimize_loss(
loss,
framework.get_global_step(),
optimizer='Adam',
# variables=d_vars,
learning_rate=opt.lr)
return accuracy, loss, train_op, W_emb
示例7: _loss_to_train_op
def _loss_to_train_op(self, loss):
"""Map `loss` to a training op."""
with ops.name_scope('loss_to_train_op'):
trainable_variables = ops.get_default_graph().get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
global_step = contrib_framework.get_global_step()
gradients = self._optimizer.compute_gradients(
loss=loss, var_list=trainable_variables)
processed_gradients = self._process_gradients(gradients)
return self._optimizer.apply_gradients(
processed_gradients, global_step=global_step)
示例8: _model_fn
def _model_fn(features, targets, mode):
ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
if self.class_weight is not None:
constant_op.constant(self.class_weight, name='class_weight')
predictions, loss = model_fn(features, targets)
if isinstance(self.learning_rate, types.FunctionType):
learning_rate = self.learning_rate(contrib_framework.get_global_step())
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
else:
optimizer = self.optimizer
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
return predictions, loss, train_op
示例9: _build_model
def _build_model(self, data, target):
ids = tensorflow.split(1, self.n_ids, data)
node_vectors = [
learn.ops.categorical_variable(ids[i], self.vocabulary_sizes[i], self.layer_size, str(i))
for i in range(self.n_ids)
]
activation_in = tensorflow.squeeze(tensorflow.concat(2, node_vectors), [1])
activation_out = layers.stack(activation_in, layers.fully_connected, self.hidden_units_formation)
prediction, loss = learn.models.linear_regression(activation_out, target)
train_op = layers.optimize_loss(loss, framework.get_global_step(), self.learning_rate, "SGD")
return prediction, loss, train_op
示例10: _model_fn
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
weights = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
inference[eval_metrics.INFERENCE_PROB_NAME] = (
graph_builder.inference_graph(features))
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
loss_deps = []
training_graph = None
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
loss_deps.append(training_graph)
training_loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
with ops.control_dependencies(loss_deps):
training_loss = graph_builder.training_loss(
features, labels, name=LOSS_NAME)
if report_feature_importances and mode == model_fn_lib.ModeKeys.EVAL:
training_loss = logging_ops.Print(training_loss,
[graph_builder.feature_importances()],
summarize=1000)
# Put weights back in
if weights is not None:
features[weights_name] = weights
training_hooks = []
if early_stopping_rounds:
training_hooks.append(TensorForestLossHook(early_stopping_rounds))
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=inference,
loss=training_loss,
train_op=training_graph,
training_hooks=training_hooks)
示例11: conv_model
def conv_model(X, Y_, mode):
XX = tf.reshape(X, [-1, 28, 28, 1])
biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
Y1 = layers.conv2d(XX, num_outputs=6, kernel_size=[6, 6], biases_initializer=biasInit)
Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
Y4 = layers.flatten(Y3)
Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
Ylogits = layers.linear(Y5, 10)
predict = tf.nn.softmax(Ylogits)
classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.001, "Adam")
return {"predictions":predict, "classes": classes}, loss, train_op
示例12: _model_fn
def _model_fn(features, labels, mode):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {}
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.INFER):
inference[eval_metrics.INFERENCE_PROB_NAME] = (
graph_builder.inference_graph(features))
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
loss_deps = []
training_graph = None
if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
loss_deps.append(training_graph)
training_loss = None
if (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.TRAIN):
with ops.control_dependencies(loss_deps):
training_loss = graph_builder.training_loss(
features, labels, name=LOSS_NAME)
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
示例13: _model_fn
def _model_fn(features, labels):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
processed_features, spec = data_ops.ParseDataTensorOrDict(features)
_assert_float32(processed_features)
if labels is not None:
labels = data_ops.ParseLabelTensorOrDict(labels)
_assert_float32(labels)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(processed_features,
data_spec=spec)}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if labels is not None:
training_loss = graph_builder.training_loss(processed_features, labels,
data_spec=spec,
name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
processed_features, labels, data_spec=spec,
input_weights=weights),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
示例14: _get_train_ops
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=self.learning_rate,
optimizer=self.optimizer,
clip_gradients=self.clip_gradients)
return train_op, loss
示例15: _get_train_ops
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
features, _, weights, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
features, labels = self._feature_engineering_fn(features, labels)
_assert_float32(features)
_assert_float32(labels)
if weights is not None:
if 'input_weights' in self.training_args:
logging.warning('Replacing input_weights in training_args.')
self.training_args['input_weights'] = weights
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner,
**self.construction_args)
epoch = None
if self.data_feeder:
epoch = self.data_feeder.make_epoch_variable()
train = control_flow_ops.group(
graph_builder.training_graph(
features, labels, data_spec=spec, epoch=epoch,
**self.training_args),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
self.training_loss = graph_builder.training_loss(features, targets)
return train, self.training_loss