本文整理汇总了Python中tensorflow.python.ops.variables.initialize_all_variables函数的典型用法代码示例。如果您正苦于以下问题:Python initialize_all_variables函数的具体用法?Python initialize_all_variables怎么用?Python initialize_all_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了initialize_all_variables函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _testTypesForFtrl
def _testTypesForFtrl(self, x, y, z, lr, grad, use_gpu=None, l1=0.0,
l2=0.0, lr_power=-0.5):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
linear = variables.Variable(z)
variables.initialize_all_variables().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = apply_ftrl.eval()
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update ** (-lr_power) - y ** (
-lr_power)) / lr * x
quadratic = 1.0 / (accum_update ** (lr_power) * lr) + 2 * l2
expected_out = np.array([(np.sign(
linear_update[i]) * l1 - linear_update[i]) / (
quadratic[i]) if np.abs(
linear_update[i]) > l1 else 0.0 for i in range(
linear_update.size)])
self.assertAllCloseAccordingToType(accum_update, accum.eval())
if x.dtype == np.float16:
# The calculations here really are not very precise in float16.
self.assertAllClose(linear_update, linear.eval(), rtol=2e-2, atol=2e-2)
self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)
else:
self.assertAllClose(linear_update, linear.eval())
self.assertAllClose(expected_out, out)
示例2: test_multiple_random_3d_updates_results_in_right_dist
def test_multiple_random_3d_updates_results_in_right_dist(self):
# Update with uniform 3-D rvs. Resultant
# histogram should be uniform. Use only 3 bins because with many bins it
# would be unlikely that all would be close to 1/n. If someone ever wants
# to test that, it would be better to check that the cdf was linear.
nbins = [3]
value_range = [1.0, 4.14159]
with self.test_session() as sess:
hist = variables.Variable(array_ops.zeros(nbins, dtype=dtypes.int32))
new_values = array_ops.placeholder(dtypes.float32, shape=[4, 4, 4])
hist_update = histogram_ops.histogram_fixed_width(hist, new_values,
value_range)
variables.initialize_all_variables().run()
for _ in range(100):
# Map the rv: U[0, 1] --> U[value_range[0], value_range[1]].
new_values_arr = (
value_range[0] +
(value_range[1] - value_range[0]) * self.rng.rand(4, 4, 4))
# The new updated_hist_array is returned by the updating op.
# hist should contain the updated values.
updated_hist_array = sess.run(hist_update,
feed_dict={new_values: new_values_arr})
pmf = updated_hist_array / float(updated_hist_array.sum())
np.testing.assert_allclose(1 / 3, pmf, atol=0.02)
示例3: test_two_updates_on_constant_input
def test_two_updates_on_constant_input(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = [5]
value_range = [0.0, 5.0]
new_values_1 = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
new_values_2 = [1.5, 4.5, 4.5, 4.5, 0.0, 0.0]
expected_bin_counts_1 = [2, 1, 1, 0, 2]
expected_bin_counts_2 = [4, 2, 1, 0, 5]
with self.test_session() as sess:
hist = variables.Variable(array_ops.zeros(nbins, dtype=dtypes.int32))
new_values = array_ops.placeholder(dtypes.float32, shape=[6])
hist_update = histogram_ops.histogram_fixed_width(hist, new_values,
value_range)
variables.initialize_all_variables().run()
updated_hist_array = sess.run(hist_update,
feed_dict={new_values: new_values_1})
# The new updated_hist_array is returned by the updating op.
# hist should contain the updated values.
self.assertAllClose(expected_bin_counts_1, updated_hist_array)
self.assertAllClose(expected_bin_counts_1, hist.eval())
updated_hist_array = sess.run(hist_update,
feed_dict={new_values: new_values_2})
self.assertAllClose(expected_bin_counts_2, updated_hist_array)
self.assertAllClose(expected_bin_counts_2, hist.eval())
示例4: _testTypesForAdam
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var_t = variables.Variable(var)
m_t = variables.Variable(m)
v_t = variables.Variable(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.Variable(beta1_power)
beta2_power_t = variables.Variable(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
variables.initialize_all_variables().run()
self.assertAllCloseAccordingToType(var, var_t.eval())
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v,
lr, beta1, beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t,
beta1_t, beta2_t, epsilon_t, grad)
out = apply_adam.eval()
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
示例5: _CheckDecay
def _CheckDecay(self, ema, actual_decay, dim):
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
variables.initialize_all_variables().run()
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertFalse(avg0 in variables.trainable_variables())
self.assertFalse(avg1 in variables.trainable_variables())
self.assertFalse(avg2 in variables.trainable_variables())
variables.initialize_all_variables().run()
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, var0.eval())
self.assertAllClose(thirties, var1.eval())
self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())
# Check that averages are initialized correctly.
self.assertAllClose(tens, avg0.eval())
self.assertAllClose(thirties, avg1.eval())
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), avg2.eval())
# Update the averages and check.
update.run()
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk), dim)
self.assertAllClose(expected, avg2.eval())
# Again, update the averages and check.
update.run()
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)),
dim)
self.assertAllClose(expected, avg2.eval())
示例6: testWithExistingEnsembleAndShrinkage
def testWithExistingEnsembleAndShrinkage(self):
with self.test_session():
# Add shrinkage config.
learning_rate = 0.0001
tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 5):
tree = tree_ensemble.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble.tree_weights.append(i + 1)
meta = tree_ensemble.tree_metadata.add()
meta.num_tree_weight_updates = 1
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble.SerializeToString(),
name="existing")
# Create non-zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=np.array([4, 7], np.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=np.array([0.2, 0.8], np.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [1, 2],
feature_gains, [0.5, 0.3], [[], []],
learning_rate=learning_rate)
]):
output_ensemble.ParseFromString(
model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())
# The weights of previous trees stayed the same, new tree (LAST) is added
# with shrinkage weight.
self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
output_ensemble.tree_weights)
# Check that all number of updates are equal to 1 (e,g, no old tree weight
# got adjusted.
for i in range(0, 6):
self.assertEqual(
1, output_ensemble.tree_metadata[i].num_tree_weight_updates)
# Ensure feature importance was aggregated correctly.
self.assertAllEqual([5, 9], feature_usage_counts.eval())
self.assertArrayNear(
[0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
feature_gains.eval(), 1e-6)
示例7: _testTypes
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
variables.initialize_all_variables().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = apply_sgd.eval()
self.assertShapeEqual(out, apply_sgd)
self.assertAllCloseAccordingToType(x - alpha * delta, out)
示例8: testAssignMovingAverage
def testAssignMovingAverage(self):
with self.test_session():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0], types.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
variables.initialize_all_variables().run()
self.assertAllClose([10.0, 11.0], var.eval())
assign.op.run()
self.assertAllClose([10.0 * 0.25 + 1.0 * (1.0 - 0.25),
11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
var.eval())
示例9: _testTypesForAdagrad
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
variables.initialize_all_variables().run()
self.assertAllCloseAccordingToType(x, var.eval())
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = apply_adagrad.eval()
self.assertShapeEqual(out, apply_adagrad)
self.assertAllCloseAccordingToType(
x - lr * grad * (y + grad * grad) ** (-0.5), out)
self.assertAllCloseAccordingToType(y + grad * grad, accum.eval())
示例10: testDenseLayerJitScopeUndefinedShape
def testDenseLayerJitScopeUndefinedShape(self):
"""Tests that the dense layer node is properly compiled in jit scope.
Dense layer uses shape op to get shape of input tensor if its shape is not
fully defined. XLA does not cluster shape op with other operators. But in
experimental_jit_scope, XLA is forced to compile shape op into its own
cluster, causing dense layer to be split into TWO XlaLaunch ops.
"""
with self.test_session() as sess:
x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
with jit_scope():
y = layers.dense(x, 3)
sess.run(variables.initialize_all_variables())
run_metadata = config_pb2.RunMetadata()
sess.run(
y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = GetRunMetadataLabels(run_metadata)
self.assertEqual(2, XlaLaunchOpCount(labels))
self.assertFalse(InLabels(labels, "ListDiff"))
示例11: testDenseLayerAutoJit
def testDenseLayerAutoJit(self):
"""Tests dense layer compilation in auto-jit mode.
Dense layer should be compiled into a single XlaLaunch op in auto-jit mode.
"""
os.environ["TF_XLA_FLAGS"] = ("--tf_xla_cpu_global_jit")
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1)
with self.test_session(config=config) as sess:
x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
y = layers.dense(x, 3)
sess.run(variables.initialize_all_variables())
run_metadata = config_pb2.RunMetadata()
sess.run(
y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
labels = GetRunMetadataLabels(run_metadata)
self.assertEqual(1, XlaLaunchOpCount(labels))
self.assertFalse(InLabels(labels, "ListDiff"))
示例12: test_train_worker_monitor
def test_train_worker_monitor(self):
# We need to explicitly set device due to check on non-chief workers
# requiring all variables to have a device assigned.
with tf.Graph().as_default() as g, g.device('/cpu:0'):
global_step = tf.contrib.framework.create_global_step(g)
train_op = tf.assign_add(global_step, 1)
loss_op = tf.constant(2.0)
tf.scalar_summary('loss', loss_op)
# Add explicit "local" init op to initialize all variables
# as there's no chief to init here.
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op)
# Create worker monitors where one should be active on the worker
# and the other chief exclusive.
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
with self.test_session(g):
loss = learn.graph_actions.train(
g, output_dir=self._output_dir,
global_step_tensor=global_step,
train_op=train_op, loss_op=loss_op,
supervisor_is_chief=False, steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(not chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'Only non-chief runnable monitor must have been active.')
self.assertTrue(not chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'Only non-chief runnable monitor must have a step.')
示例13: testReadWrite
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, 47)
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, 3)
with ops.control_dependencies([d]):
e = x.read_value()
session.run(variables.initialize_all_variables())
v1, v2, v3 = session.run([a, c, e])
self.assertAllClose(2.0, v1)
self.assertAllClose(47.0, v2)
self.assertAllClose(50.0, v3)
示例14: testKernelStateTensor
def testKernelStateTensor(self):
"""Test that transition kernel works with tensor input to `state`."""
loc = variable_scope.get_variable("loc", initializer=0.)
def target_log_prob_fn(loc):
return normal_lib.Normal(loc=0.0, scale=0.1).log_prob(loc)
new_state, _ = mh.kernel(
target_log_prob_fn=target_log_prob_fn,
proposal_fn=mh.proposal_normal(scale=0.05),
current_state=loc,
seed=231251)
loc_update = loc.assign(new_state)
init = variables.initialize_all_variables()
with self.test_session() as sess:
sess.run(init)
loc_samples = []
for _ in range(2500):
loc_sample = sess.run(loc_update)
loc_samples.append(loc_sample)
loc_samples = loc_samples[500:] # drop samples for burn-in
self.assertAllClose(np.mean(loc_samples), 0.0, rtol=1e-5, atol=1e-1)
self.assertAllClose(np.std(loc_samples), 0.1, rtol=1e-5, atol=1e-1)
示例15: testAcceptsRefs
def testAcceptsRefs(self):
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.initialize_all_variables()
with self.test_session() as sess:
sess.run(init)
self.assertEqual(30, result.eval())