本文整理汇总了Python中tensorflow.initialize_all_tables函数的典型用法代码示例。如果您正苦于以下问题:Python initialize_all_tables函数的具体用法?Python initialize_all_tables怎么用?Python initialize_all_tables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了initialize_all_tables函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testMultipleHashTables
def testMultipleHashTables(self):
with self.test_session() as sess:
shared_name = ''
default_val = -1
table1 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
table2 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
table3 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
keys = tf.constant(['brain', 'salad', 'surgery'])
values = tf.constant([0, 1, 2], tf.int64)
table1.initialize_from(keys, values)
table2.initialize_from(keys, values)
table3.initialize_from(keys, values)
tf.initialize_all_tables().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = tf.constant(['brain', 'salad', 'tank'])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
示例2: testMultipleHashTables
def testMultipleHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = tf.constant(["brain", "salad", "surgery"])
values = tf.constant([0, 1, 2], tf.int64)
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
default_val)
tf.initialize_all_tables().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
示例3: test_duplicate_entries
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = tf.constant(["hello", "hello"])
indices = tf.constant([0, 1, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)
tf.initialize_all_tables().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
示例4: test_index_to_string
def test_index_to_string(self):
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([0, 1, 2, 3], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), feats.eval())
示例5: test_string_to_index_with_default_value
def test_string_to_index_with_default_value(self):
default_value = -42
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
feats = tf.constant(["salad", "surgery", "tarkus"])
indices = tf.contrib.lookup.string_to_index(feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(tf.OpError, indices.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
示例6: test_index_to_string_with_default_value
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = tf.constant(["brain", "salad", "surgery"])
indices = tf.constant([1, 2, 4], tf.int64)
feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(tf.OpError, feats.eval)
tf.initialize_all_tables().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
示例7: apply_model
def apply_model(self, x):
x = x.data
tmp = np.zeros((1,1))
with tf.Session(graph=self._graph) as sess:
tf.initialize_all_tables().run()
feed_dict = {self._x: x,
self._W: self._result_W,
self._b: self._result_b}
tmp = sess.run(self._y, feed_dict=feed_dict)
ret = BrewPipeDataFrame('y')
ret.data = tmp
return ret
示例8: testInitializeSameTableWithMultipleNodes
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.test_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(
vocabulary_file, tf.string,
tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
tf.initialize_all_tables().run()
input_string = tf.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
示例9: testGetModelInput
def testGetModelInput(self):
initial_state, sequence_input = self._rnn_estimator._get_model_input(
self._columns_to_tensors)
self.assertIsNone(initial_state)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_all_tables())
sequence_input_val = sess.run(sequence_input)
expected_shape = np.array([
3, # expected batch size
2, # padded sequence length
3 + 8 + 2 # location keys + embedding dim + measurement dimension
])
self.assertAllEqual(expected_shape, sequence_input_val.shape)
示例10: testBuildSequenceInputInput
def testBuildSequenceInputInput(self):
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.columns_to_tensors,
self.sequence_feature_columns,
self.context_feature_columns)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.initialize_all_tables())
sequence_input_val = sess.run(sequence_input)
expected_shape = np.array([
3, # expected batch size
2, # padded sequence length
3 + 8 + 2 # location keys + embedding dim + measurement dimension
])
self.assertAllEqual(expected_shape, sequence_input_val.shape)
示例11: testConstructRNN
def testConstructRNN(self):
"""Test `DynamicRNNEstimator._construct_rnn`."""
initial_state, sequence_input = self._rnn_estimator._get_model_input(
self._columns_to_tensors)
activations_t, final_state_t = self._rnn_estimator._construct_rnn(
initial_state, sequence_input)
# Obtain values of activations and final state.
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_all_tables())
activations, final_state = sess.run([activations_t, final_state_t])
expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
self.assertAllEqual(expected_activations_shape, activations.shape)
expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
self.assertAllEqual(expected_state_shape, final_state.shape)
示例12: testConstructRNN
def testConstructRNN(self):
initial_state = None
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.columns_to_tensors,
self.sequence_feature_columns,
self.context_feature_columns)
activations_t, final_state_t = dynamic_rnn_estimator.construct_rnn(
initial_state,
sequence_input,
self.rnn_cell,
self.mock_target_column.num_label_columns)
# Obtain values of activations and final state.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.initialize_all_tables())
activations, final_state = sess.run([activations_t, final_state_t])
expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
self.assertAllEqual(expected_activations_shape, activations.shape)
expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
self.assertAllEqual(expected_state_shape, final_state.shape)
示例13: main
#.........这里部分代码省略.........
exit(1)
logging.info("Export the saved model to {}".format(
FLAGS.saved_model_path))
export_path_base = FLAGS.saved_model_path
export_path = os.path.join(
compat.as_bytes(export_path_base),
compat.as_bytes(str(FLAGS.model_version)))
model_signature = signature_def_utils.build_signature_def(
inputs={
"keys": utils.build_tensor_info(keys_placeholder),
"indexs": utils.build_tensor_info(sparse_index),
"ids": utils.build_tensor_info(sparse_ids),
"values": utils.build_tensor_info(sparse_values),
"shape": utils.build_tensor_info(sparse_shape)
},
outputs={
"keys": utils.build_tensor_info(keys),
"softmax": utils.build_tensor_info(inference_softmax),
"prediction": utils.build_tensor_info(inference_op)
},
method_name=signature_constants.PREDICT_METHOD_NAME)
try:
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess,
[tag_constants.SERVING],
clear_devices=True,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature,
},
#legacy_init_op=legacy_init_op)
legacy_init_op=tf.group(tf.initialize_all_tables(),
name="legacy_init_op"))
builder.save()
except Exception as e:
logging.error("Fail to export saved model, exception: {}".format(e))
elif MODE == "inference":
if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
logging.error("No checkpoint found, exit now")
exit(1)
# Load inference test data
inference_result_file_name = "./inference_result.txt"
inference_test_file_name = "./data/a8a_test.libsvm"
labels = []
feature_ids = []
feature_values = []
feature_index = []
ins_num = 0
for line in open(inference_test_file_name, "r"):
tokens = line.split(" ")
labels.append(int(tokens[0]))
feature_num = 0
for feature in tokens[1:]:
feature_id, feature_value = feature.split(":")
feature_ids.append(int(feature_id))
feature_values.append(float(feature_value))
feature_index.append([ins_num, feature_num])
feature_num += 1
ins_num += 1
# Run inference
start_time = datetime.datetime.now()
prediction, prediction_softmax = sess.run(
[inference_op, inference_softmax],
feed_dict={sparse_index: feature_index,
sparse_ids: feature_ids,
sparse_values: feature_values,
sparse_shape: [ins_num, FEATURE_SIZE]})
end_time = datetime.datetime.now()
# Compute accuracy
label_number = len(labels)
correct_label_number = 0
for i in range(label_number):
if labels[i] == prediction[i]:
correct_label_number += 1
accuracy = float(correct_label_number) / label_number
# Compute auc
expected_labels = np.array(labels)
predict_labels = prediction_softmax[:, 0]
fpr, tpr, thresholds = metrics.roc_curve(expected_labels,
predict_labels,
pos_label=0)
auc = metrics.auc(fpr, tpr)
logging.info("[{}] Inference accuracy: {}, auc: {}".format(
end_time - start_time, accuracy, auc))
# Save result into the file
np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
logging.info("Save result to file: {}".format(
inference_result_file_name))
示例14: export
#.........这里部分代码省略.........
with tf.Graph().as_default():
# Build inference model.
# Please refer to Tensorflow inception model for details.
# Input transformation.
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
# Run inference.
logits, _ = inception_model.inference(images, NUM_CLASSES + 1)
# Transform output to topK result.
values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)
# Create a constant string Tensor where the i'th element is
# the human readable class description for the i'th index.
# Note that the 0th index is an unused background class
# (see inception model definition code).
class_descriptions = ['unused background']
for s in synsets:
class_descriptions.append(texts[s])
class_tensor = tf.constant(class_descriptions)
classes = tf.contrib.lookup.index_to_string(
tf.to_int64(indices), mapping=class_tensor)
# Restore variables from training checkpoint.
variable_averages = tf.train.ExponentialMovingAverage(
inception_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from training checkpoints.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print 'Successfully loaded model from %s at step=%s.' % (
ckpt.model_checkpoint_path, global_step)
else:
print 'No checkpoint file found at %s' % FLAGS.checkpoint_dir
return
# Export inference model.
output_path = os.path.join(
compat.as_bytes(FLAGS.output_dir),
compat.as_bytes(str(FLAGS.model_version)))
print 'Exporting trained model to', output_path
builder = saved_model_builder.SavedModelBuilder(output_path)
# Build the signature_def_map.
classify_inputs_tensor_info = utils.build_tensor_info(
serialized_tf_example)
classes_output_tensor_info = utils.build_tensor_info(classes)
scores_output_tensor_info = utils.build_tensor_info(values)
classification_signature = signature_def_utils.build_signature_def(
inputs={
signature_constants.CLASSIFY_INPUTS: classify_inputs_tensor_info
},
outputs={
signature_constants.CLASSIFY_OUTPUT_CLASSES:
classes_output_tensor_info,
signature_constants.CLASSIFY_OUTPUT_SCORES:
scores_output_tensor_info
},
method_name=signature_constants.CLASSIFY_METHOD_NAME)
predict_inputs_tensor_info = utils.build_tensor_info(jpegs)
prediction_signature = signature_def_utils.build_signature_def(
inputs={'images': predict_inputs_tensor_info},
outputs={
'classes': classes_output_tensor_info,
'scores': scores_output_tensor_info
},
method_name=signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(
tf.initialize_all_tables(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
'predict_images':
prediction_signature,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
classification_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
print 'Successfully exported model to %s' % FLAGS.output_dir
示例15: operation
Args:
logits: A `Tensor`. Must be one of the following types: `float32`, `float64`.
2-D with shape `[batch_size, num_classes]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
'''
# To implement cross-entropy
# placeholder to input the correct answers
y_ = tf.placeholder(tf.float32, [None, mnist.train.labels.shape[1]])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# training setup
# learning rate = 0.5
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# initialize the variables we created
init = tf.initialize_all_tables()
# launch the model in a Session
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})