本文整理汇总了Python中tensorflow.compat.v1.global_variables_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python v1.global_variables_initializer方法的具体用法?Python v1.global_variables_initializer怎么用?Python v1.global_variables_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.global_variables_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAppendGradientsWithLossScaleWithAutoScaleDisabled
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testAppendGradientsWithLossScaleWithAutoScaleDisabled(self):
v = tf.Variable(0)
training_ops = []
get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
loss_scale_params = variable_mgr_util.AutoLossScaleParams(
enable_auto_loss_scale=False, # no auto loss scale.
loss_scale=tf.Variable(4),
loss_scale_normal_steps=tf.Variable(10),
inc_loss_scale_every_n=10,
is_chief=True)
variable_mgr_util.append_gradients_with_loss_scale(
training_ops,
get_apply_gradients_ops_func,
loss_scale_params,
grad_has_inf_nan=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(training_ops)
self.assertEqual(sess.run(v), 1)
self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10)
示例2: testAppendGradientsWithLossScaleForNonChiefWorker
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testAppendGradientsWithLossScaleForNonChiefWorker(self):
v = tf.Variable(0)
training_ops = []
get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
loss_scale_params = variable_mgr_util.AutoLossScaleParams(
enable_auto_loss_scale=True,
loss_scale=tf.Variable(4),
loss_scale_normal_steps=tf.Variable(10),
inc_loss_scale_every_n=10,
is_chief=False) # Non-chief
variable_mgr_util.append_gradients_with_loss_scale(
training_ops,
get_apply_gradients_ops_func,
loss_scale_params,
grad_has_inf_nan=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(training_ops)
self.assertEqual(sess.run(v), 1)
self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10)
示例3: testAppendGradientsWithLossScaleWithtNan
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testAppendGradientsWithLossScaleWithtNan(self):
v = tf.Variable(0)
training_ops = []
get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
loss_scale_params = variable_mgr_util.AutoLossScaleParams(
enable_auto_loss_scale=True,
loss_scale=tf.Variable(4, dtype=tf.float32),
loss_scale_normal_steps=tf.Variable(10),
inc_loss_scale_every_n=10,
is_chief=True)
variable_mgr_util.append_gradients_with_loss_scale(
training_ops,
get_apply_gradients_ops_func,
loss_scale_params,
grad_has_inf_nan=tf.constant(True))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(training_ops)
self.assertEqual(sess.run(v), 0) # Skip updating for v.
# halve loss_scale and reset local_scale_normal_steps.
self.assertEqual(sess.run(loss_scale_params.loss_scale), 2)
self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0)
示例4: evaluate
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def evaluate(self, env_fn, hparams, sampling_temp):
with tf.Graph().as_default():
with tf.name_scope("rl_eval"):
eval_env = env_fn(in_graph=True)
(collect_memory, _, collect_init) = _define_collect(
eval_env,
hparams,
"ppo_eval",
eval_phase=True,
frame_stack_size=self.frame_stack_size,
force_beginning_resets=False,
sampling_temp=sampling_temp,
distributional_size=self._distributional_size,
)
model_saver = tf.train.Saver(
tf.global_variables(hparams.policy_network + "/.*")
# tf.global_variables("clean_scope.*") # Needed for sharing params.
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
collect_init(sess)
trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver,
sess)
sess.run(collect_memory)
示例5: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def __init__(self, hparams, action_space, observation_space, policy_dir):
assert hparams.base_algo == "ppo"
ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape
self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8)
with tf.Graph().as_default():
self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8)
self.logits_t, self.value_function_t = get_policy(
self.obs_t, ppo_hparams, action_space
)
model_saver = tf.train.Saver(
tf.global_variables(scope=ppo_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver,
self.sess)
示例6: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def __init__(
self, batch_size, observation_space, action_space, policy_hparams,
policy_dir, sampling_temp
):
super(PolicyAgent, self).__init__(
batch_size, observation_space, action_space
)
self._sampling_temp = sampling_temp
with tf.Graph().as_default():
self._observations_t = tf.placeholder(
shape=((batch_size,) + self.observation_space.shape),
dtype=self.observation_space.dtype
)
(logits, self._values_t) = rl.get_policy(
self._observations_t, policy_hparams, self.action_space
)
actions = common_layers.sample_with_temperature(logits, sampling_temp)
self._probs_t = tf.nn.softmax(logits / sampling_temp)
self._actions_t = tf.cast(actions, tf.int32)
model_saver = tf.train.Saver(
tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
示例7: testAccuracyTopKMetric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
示例8: testSequenceEditDistanceMetric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testSequenceEditDistanceMetric(self):
predictions = np.array([[3, 4, 5, 1, 0, 0],
[2, 1, 3, 4, 0, 0],
[2, 1, 3, 4, 0, 0]])
# Targets are just a bit different:
# - first sequence has a different prediction
# - second sequence has a different prediction and one extra step
# - third sequence is identical
targets = np.array([[5, 4, 5, 1, 0, 0],
[2, 5, 3, 4, 1, 0],
[2, 1, 3, 4, 0, 0]])
# Reshape to match expected input format by metric fns.
predictions = np.reshape(predictions, [3, 6, 1, 1])
targets = np.reshape(targets, [3, 6, 1, 1])
with self.test_session() as session:
scores, weight = metrics.sequence_edit_distance(
tf.one_hot(predictions, depth=6, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual_scores, actual_weight = session.run([scores, weight])
self.assertAlmostEqual(actual_scores, 3.0 / 13)
self.assertEqual(actual_weight, 13)
示例9: testNegativeLogPerplexityMaskedAssert
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testNegativeLogPerplexityMaskedAssert(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
features = {}
with self.assertRaisesRegexp(
ValueError,
'masked_neg_log_perplexity requires targets_mask feature'):
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity_with_masking(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32),
features)
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
_ = session.run(a)
示例10: testSigmoidAccuracyOneHot
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testSigmoidAccuracyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[-1., 1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[1, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.5)
示例11: testSigmoidPrecisionOneHot
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testSigmoidPrecisionOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_precision_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
示例12: testSigmoidRecallOneHot
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testSigmoidRecallOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_recall_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
示例13: testSigmoidCrossEntropyOneHot
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testSigmoidCrossEntropyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[0, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.688, places=3)
示例14: testRocAuc
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testRocAuc(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[1],
[0],
[1],
[0]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.roc_auc(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.750, places=3)
示例15: testMultilabelMatch3
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import global_variables_initializer [as 别名]
def testMultilabelMatch3(self):
predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
targets *= weights
predictions_repeat = np.repeat(predictions, 10, axis=1)
expected = (predictions_repeat == targets).astype(float)
expected = np.sum(expected, axis=(1, 2, 3))
expected = np.minimum(expected / 3.0, 1.)
expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
with self.test_session() as session:
scores, weights_ = metrics.multilabel_accuracy_match3(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a, a_op = tf.metrics.mean(scores, weights_)
session.run(tf.local_variables_initializer())
session.run(tf.global_variables_initializer())
_ = session.run(a_op)
actual = session.run(a)
self.assertAlmostEqual(actual, expected, places=6)