本文整理汇总了Python中tensorflow.global_variables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python global_variables_initializer函数的具体用法?Python global_variables_initializer怎么用?Python global_variables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了global_variables_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAdagradDAWithL1
def testAdagradDAWithL1(self):
for dtype in [tf.float64, tf.float32]:
with self.test_session() as sess:
global_step = tf.Variable(0, dtype=tf.int64)
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run a step of AdagradDA
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), v1_val)
示例2: testYesShuffle
def testYesShuffle(self):
id_source = rs.ReaderSource(reader_cls=tf.IdentityReader,
work_units=self.work_units,
batch_size=1,
shuffle=True,
num_threads=10,
seed=1234)
index_column, value_column = id_source()
cache = {}
index_tensor = index_column.build(cache)
value_tensor = value_column.build(cache)
self.assertEqual([1], index_tensor.get_shape().as_list())
self.assertEqual([1], value_tensor.get_shape().as_list())
seen = set([])
with self.test_session() as sess:
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(500):
index, value = sess.run([index_tensor, value_tensor])
self.assertEqual(index, value)
self.assertNotIn(int(value[0]), seen)
seen.add(int(value[0]))
coord.request_stop()
coord.join(threads)
示例3: init_training_graph
def init_training_graph(self):
with tf.name_scope('Evaluation'):
# self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
with tf.name_scope("logits/"):
self.logits2 = tf.nn.conv2d(self.last, self.logits_weight, strides=[1,1,1,1], padding="VALID")
self.logits = tf.nn.bias_add(self.logits2, self.logits_biases)
self.predictions = self.logits
#self.predictions = tf.squeeze(self.logits, [3])
#softmax = tf.nn.softmax(self.logits)
#print softmax.get_shape()
#self.predictions = tf.slice(softmax, [0, 0, 0, 0], [-1, -1, -1, 1])
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.logits, self.train_labels_node))
#self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.predictions, self.train_labels_node))
tf.summary.scalar("mean_squared_error", self.loss)
self.predictions = tf.squeeze(self.predictions, [3])
self.train_prediction = self.predictions
self.test_prediction = self.predictions
tf.global_variables_initializer().run()
print('Computational graph initialised')
示例4: testRasterScanKernel
def testRasterScanKernel(self):
kernel_size = 5
input_depth = 1
output_depth = 1
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
# pylint: disable=bad-whitespace
kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0]]
kernel_feed = np.reshape(kernel_feed, kernel_shape)
kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0]]
kernel_expected = np.reshape(kernel_expected, kernel_shape)
# pylint: enable=bad-whitespace
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.RasterScanConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])
_ = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
kernel_value = masked_conv2d._kernel.eval()
self.assertAllEqual(kernel_expected, kernel_value)
示例5: basic_operation
def basic_operation():
v1 = tf.Variable(10)
v2 = tf.Variable(5)
addv = v1 + v2
print(addv)
print(type(addv))
print(type(v1))
c1 = tf.constant(10)
c2 = tf.constant(5)
addc = c1 + c2
print(addc)
print(type(addc))
print(type(c1))
# 用来运行计算图谱的对象/实例?
# session is a runtime
sess = tf.Session()
# Variable -> 初始化 -> 有值的Tensor
tf.global_variables_initializer().run(session=sess)
print('变量是需要初始化的')
print('加法(v1, v2) = ', addv.eval(session=sess))
print('加法(v1, v2) = ', sess.run(addv))
print('加法(c1, c2) = ', addc.eval(session=sess))
示例6: testMultipleDequeue
def testMultipleDequeue(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = tf.random_normal([image_size, image_size, 3], dtype=tf.float32, name="images")
label = tf.random_uniform([1], 0, 10, dtype=tf.int32, name="labels")
batches = tf.train.batch([counter, image, label], batch_size=batch_size, num_threads=4)
batcher = slim.prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
tf.global_variables_initializer().run()
threads = tf.train.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape, (batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(np.sort(np.concatenate(value_counter)), np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
示例7: train
def train(data_dir, checkpoint_path, config):
"""Trains the model with the given data
Args:
data_dir: path to the data for the model (see data_utils for data
format)
checkpoint_path: the path to save the trained model checkpoints
config: one of the above configs that specify the model and how it
should be run and trained
Returns:
None
"""
# Prepare Name data.
print("Reading Name data in %s" % data_dir)
names, counts = data_utils.read_names(data_dir)
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = NamignizerModel(is_training=True, config=config)
tf.global_variables_initializer().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" %
(i + 1, train_perplexity))
m.saver.save(session, checkpoint_path, global_step=i)
示例8: testDenseFeaturesSeparableWithinMargins
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
示例9: testDenseFeaturesWeightedExamples
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
示例10: testDenseFeaturesWithArbitraryWeights
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0/3],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
示例11: testDenseFeaturesPerfectlySeparable
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
示例12: testDenseFeaturesWithDefaultWeights
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
示例13: testL1Regularization
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
示例14: testFractionalExampleLabel
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
示例15: testMultiLabelWithCenteredBias
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, enable_centered_bias=True,
metric_class_ids=range(n_classes))
with tf.Graph().as_default(), tf.Session():
logits = tf.constant([[1., 0., 0.]])
labels = tf.constant([[0, 0, 1]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=logits)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
_assert_summary_tags(self, ["loss",
"centered_bias/bias_0",
"centered_bias/bias_1",
"centered_bias/bias_2"])
expected_loss = .89985204
_assert_metrics(
self, expected_loss, self._expected_eval_metrics(expected_loss),
model_fn_ops)