本文整理汇总了Python中tensorflow.python.training.monitored_session.ChiefSessionCreator方法的典型用法代码示例。如果您正苦于以下问题:Python monitored_session.ChiefSessionCreator方法的具体用法?Python monitored_session.ChiefSessionCreator怎么用?Python monitored_session.ChiefSessionCreator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.monitored_session
的用法示例。
在下文中一共展示了monitored_session.ChiefSessionCreator方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_moving_variables_properly_loaded_from_a_checkpoint
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def test_moving_variables_properly_loaded_from_a_checkpoint(self):
batch_size = 32
dataset_name = 'fsns'
images_placeholder, endpoints = demo_inference.create_model(batch_size,
dataset_name)
image_path_pattern = 'testdata/fsns_train_%02d.png'
images_data = demo_inference.load_images(image_path_pattern, batch_size,
dataset_name)
tensor_name = 'AttentionOcr_v1/conv_tower_fn/INCE/InceptionV3/Conv2d_2a_3x3/BatchNorm/moving_mean'
moving_mean_tf = tf.get_default_graph().get_tensor_by_name(
tensor_name + ':0')
reader = tf.train.NewCheckpointReader(_CHECKPOINT)
moving_mean_expected = reader.get_tensor(tensor_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=_CHECKPOINT)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
moving_mean_np = sess.run(moving_mean_tf,
feed_dict={images_placeholder: images_data})
self.assertAllEqual(moving_mean_expected, moving_mean_np)
示例2: _infer_model
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
示例3: _infer_model
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._call_legacy_get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
示例4: run
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return predictions.tolist()
示例5: correlation_matrix
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def correlation_matrix(nb_batches, checkpoint_dir):
"""Computes logits and labels of the input posts and save them as numpy files.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
posts_logits = []
posts_labels = []
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
for i in range(nb_batches):
np_logits, np_labels = session.run([model.logits, model.labels])
posts_logits.append(np_logits)
posts_labels.append(np_labels)
posts_logits, posts_labels = np.vstack(posts_logits), np.hstack(posts_labels)
np.save('data/posts_logits.npy', posts_logits)
np.save('data/posts_labels.npy', posts_labels)
return posts_logits, posts_labels
示例6: run
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return [pr_bytes.decode('utf-8') for pr_bytes in predictions.tolist()]
示例7: _predict
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def _predict(self, run_ctx, step):
var_name_to_value = run_ctx.session.run(self._var_name_to_train_var)
logging.info('Building placeholders.')
placeholder_to_value = {
self._var_name_to_placeholder[v_name]: var_name_to_value[v_name]
for v_name in var_name_to_value
}
def feed_variables(scaffold, session):
del scaffold
session.run(self._var_feed_op, feed_dict=placeholder_to_value)
logging.info('Building scaffold.')
scaffold = training.Scaffold(init_fn=feed_variables)
with self._graph.as_default():
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=None,
master=run_ctx.session.sess_str)
self._handler.setup(step)
logging.info('Setup done.')
with monitored_session.MonitoredSession(
session_creator=session_creator,
hooks=self._all_hooks) as predict_session:
while not predict_session.should_stop():
logging.info('Predicting.... %s', self._predictions)
preds_evaluated = predict_session.run(self._predictions)
if not isinstance(self._predictions, dict):
for pred in preds_evaluated:
self._handler.handle_prediction(pred)
else:
for i in range(self._estimator._extract_batch_length(preds_evaluated)):
self._handler.handle_prediction({
key: value[i]
for key, value in six.iteritems(preds_evaluated)
})
logging.info('Finalizing.')
self._handler.finalize(step)
logging.info('Done with prediction.')
self._timer.update_last_triggered_step(step)
示例8: outliers_detection
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def outliers_detection(checkpoint_dir):
"""Find outliers using Euclidean distance in the last dense layer.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
im_features_size = config['im_features_size']
rnn_size = config['rnn_size']
dense_mean = np.zeros((im_features_size + rnn_size))
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
batch_size = config['batch_size']
nb_batches = model.dataset.num_samples / batch_size
for i in range(nb_batches):
current_dense = session.run(model.concat_features)
weight = float(i) * batch_size / ((i+1) * batch_size)
dense_mean = weight * dense_mean + (1-weight) * current_dense.mean(axis=0)
# Now look at outliers
max_norms = np.zeros((batch_size))
max_post_ids = np.zeros((batch_size))
max_logits = np.zeros((batch_size, model.dataset.num_classes))
for i in range(nb_batches):
current_dense, np_post_ids, current_logits = session.run([model.concat_features, model.post_ids,
model.logits])
current_diff = np.linalg.norm(current_dense - dense_mean, axis=1)
for k in range(batch_size):
if current_diff[k] > max_norms[k]:
max_norms[k] = current_diff[k]
max_post_ids[k] = np_post_ids[k]
max_logits[k] = current_logits[k]
np.save('data/max_norms.npy', max_norms)
np.save('data/max_post_ids.npy', max_post_ids)
np.save('data/max_logits.npy', max_logits)
return max_norms, max_post_ids, max_logits
示例9: day_of_week_trend
# 需要导入模块: from tensorflow.python.training import monitored_session [as 别名]
# 或者: from tensorflow.python.training.monitored_session import ChiefSessionCreator [as 别名]
def day_of_week_trend(checkpoint_dir):
"""Compute day of week trend.
Parameters:
checkpoint_dir: Checkpoint of the saved model during training.
"""
with tf.Graph().as_default():
config = _CONFIG.copy()
config['mode'] = 'validation'
model = DeepSentiment(config)
# Load model
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
scaffold = monitored_session.Scaffold(
init_op=None, init_feed_dict=None,
init_fn=None, saver=None)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master='',
config=None)
posts_logits = []
posts_labels = []
posts_days = []
posts_ids = []
with monitored_session.MonitoredSession( # Generate queue
session_creator=session_creator, hooks=None) as session:
batch_size = config['batch_size']
nb_batches = model.dataset.num_samples / batch_size
for i in range(nb_batches):
np_logits, np_labels, np_days, np_post_ids = session.run([model.logits, model.labels,
model.days, model.post_ids])
posts_logits.append(np_logits)
posts_labels.append(np_labels)
posts_days.append(np_days)
posts_ids.append(np_post_ids)
posts_logits, posts_labels = np.vstack(posts_logits), np.hstack(posts_labels)
posts_days, posts_ids = np.hstack(posts_days), np.hstack(posts_ids)
np.save('data/posts_logits_week.npy', posts_logits)
np.save('data/posts_labels_week.npy', posts_labels)
np.save('data/posts_days_week.npy', posts_days)
np.save('data/posts_ids_week.npy', posts_ids)
return posts_logits, posts_labels, posts_days, posts_ids