本文整理汇总了Python中tensorflow.initialize_all_variables函数的典型用法代码示例。如果您正苦于以下问题:Python initialize_all_variables函数的具体用法?Python initialize_all_variables怎么用?Python initialize_all_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了initialize_all_variables函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testWhileUpdateVariable_6
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
c = tf.constant(0)
tf.initialize_all_variables().run()
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn1 = tf.assign_add(var_a, 1, name="a_add")
with tf.control_dependencies([asn1]):
asn2 = tf.assign_add(var_b, var_a, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.While(pred, loop_body, [c], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
示例2: testBlockGRUToGRUCellSingleStep
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
示例3: testInt64
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = tf.Variable(np.int64(15), name="v")
save = tf.train.Saver({"v": v}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = tf.Variable(np.int64(-1), name="v")
save = tf.train.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
示例4: main
def main(argv):
mapDict = getKanjiMap()
with tf.Session() as sess:
tf.initialize_all_variables().run()
#restore variables from training process
saver = tf.train.Saver(loadParam)
saver.restore(sess, MODEL_NAME)
for argc in range(1,len(sys.argv)):
fName = sys.argv[argc]
if os.path.isfile(fName):
img = cv2.imread(fName,0)
img=prepareImage(img)
# to ensure that image has 0 mean and [-1:1]
img = (img - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
img = img.reshape([1,IMAGE_SIZE,IMAGE_SIZE,1])
predictions = sess.run(
eval_prediction,
feed_dict={eval_data_node: img})
labelID = (np.argmax(predictions))
print("labelID: %d; Recognized Kanji:%s" %(labelID, mapDict[str(labelID)]))
else:
print("%s does not exist\n" %(fName))
continue
示例5: enlargeDataset
def enlargeDataset(images, byte_data, names, labels, is_hard):
extendListEightTimes(labels)
extendListEightTimes(names)
extendListEightTimes(is_hard)
with tf.Session() as sess:
tf.initialize_all_variables().run()
l = len(images)
for j in range(7):
print(l)
train_data2 = []
start = time.time()
for i in range(l):
imageTensor = tf.image.random_contrast(images[i], 0.2, 1.8)
imageTensor = tf.image.random_flip_left_right(imageTensor)
imageTensor = tf.image.random_flip_up_down(imageTensor)
imageTensor = tf.image.random_brightness(imageTensor, max_delta=50 / 255.0)
imageTensor = tf.image.random_saturation(imageTensor, 0.2, 1.8)
train_data2.append(imageTensor)
print(time.time() - start)
start = time.time()
train_data2 = sess.run(train_data2)
print(type(train_data2))
print('time2:', time.time() - start)
print train_data2[0][16]
for i in range(l):
byte_data.extend(train_data2[i].flatten())
return byte_data, names, labels, is_hard
示例6: applyOptimizer
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]])
var1 = tf.Variable([[0.0], [0.0]])
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1]),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1]),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0])
var1 = tf.Variable([0.0, 0.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllClose([[0.0], [0.0]], v0_val)
self.assertAllClose([[0.0], [0.0]], v1_val)
else:
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
示例7: testFtrlWithL1
def testFtrlWithL1(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllClose(np.array([-0.93460727, -1.86147261]),
v1_val)
示例8: testImbalancedWithExampleWeights
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [1]}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type="logistic_loss",
)
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
示例9: testInstancesOfOneClassOnly
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type="logistic_loss",
)
lr = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
示例10: testSparseBasic
def testSparseBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = tf.IndexedSlices(
tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(
tf.constant([0.01], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
ada_opt = tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
示例11: testLSTMBasicToBlockPeeping
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = tf.convert_to_tensor(
np.random.randn(batch_size, input_size),
dtype=tf.float32)
inputs.append(inp)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.LSTMCell(cell_size,
use_peepholes=True,
state_is_tuple=True)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
basic_outputs = sess.run(outputs)
basic_grads = sess.run(tf.gradients(outputs, inputs))
basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))
with tf.variable_scope("block", initializer=initializer):
w = tf.get_variable("w",
shape=[input_size + cell_size, cell_size * 4],
dtype=tf.float32)
b = tf.get_variable("b",
shape=[cell_size * 4],
dtype=tf.float32,
initializer=tf.zeros_initializer)
wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)
_, _, _, _, _, _, outputs = fused_lstm(
tf.convert_to_tensor(sequence_length,
dtype=tf.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
sess.run([tf.initialize_all_variables()])
block_outputs = sess.run(outputs)
block_grads = sess.run(tf.gradients(outputs, inputs))
block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
示例12: testSharing
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
tf.initialize_all_variables().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
示例13: train_model
def train_model(args):
data_loader = InputHandler(args.data_dir, args.batch_size, args.result_length)
args.vocabulary_size = data_loader.vocabulary_size
# Save the original files, so that we can load the model when sampling
with open(os.path.join(args.snapshots_dir, CONFIGURATION_FILE), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.snapshots_dir, WORDS_VOCABULARY_FILE), 'wb') as f:
cPickle.dump((data_loader.words, data_loader.vocabulary), f)
model = RNNModel(args.rnn_size, args.network_depth, args.batch_size, args.result_length,
args.vocabulary_size, args.gradient)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
for e in range(args.num_epochs):
session.run(tf.assign(model.lr, args.training_rate * (args.decay_rate ** e)))
data_loader.set_batch_pointer_to_zero()
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
x, y = data_loader.get_next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = session.run([model.cost, model.final_state, model.train_op], feed)
if (e * data_loader.num_batches + b) % args.snapshot == 0 \
or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result
snapshot_path = os.path.join(args.snapshots_dir, 'model.ckpt')
saver.save(session, snapshot_path, global_step = e * data_loader.num_batches + b)
print("Model snapshot was taken to {}".format(snapshot_path))
示例14: testBasicLSTMCell
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 8])
g, out_m = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421,
0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = tf.zeros([1, 4])
g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2, input_size=3)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
示例15: testDenseFeaturesWeightedExamples
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]], weights=[3.0, 1.0], labels=[1.0, 0.0]
)
options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
model = SdcaModel(examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)