本文整理汇总了Python中tensorflow.compat.v1.Session方法的典型用法代码示例。如果您正苦于以下问题:Python v1.Session方法的具体用法?Python v1.Session怎么用?Python v1.Session使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.Session方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def evaluate(self, env_fn, hparams, sampling_temp):
with tf.Graph().as_default():
with tf.name_scope("rl_eval"):
eval_env = env_fn(in_graph=True)
(collect_memory, _, collect_init) = _define_collect(
eval_env,
hparams,
"ppo_eval",
eval_phase=True,
frame_stack_size=self.frame_stack_size,
force_beginning_resets=False,
sampling_temp=sampling_temp,
distributional_size=self._distributional_size,
)
model_saver = tf.train.Saver(
tf.global_variables(hparams.policy_network + "/.*")
# tf.global_variables("clean_scope.*") # Needed for sharing params.
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
collect_init(sess)
trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver,
sess)
sess.run(collect_memory)
示例2: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def __init__(self, hparams, action_space, observation_space, policy_dir):
assert hparams.base_algo == "ppo"
ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape
self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8)
with tf.Graph().as_default():
self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8)
self.logits_t, self.value_function_t = get_policy(
self.obs_t, ppo_hparams, action_space
)
model_saver = tf.train.Saver(
tf.global_variables(scope=ppo_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver,
self.sess)
示例3: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def __init__(
self, batch_size, observation_space, action_space, policy_hparams,
policy_dir, sampling_temp
):
super(PolicyAgent, self).__init__(
batch_size, observation_space, action_space
)
self._sampling_temp = sampling_temp
with tf.Graph().as_default():
self._observations_t = tf.placeholder(
shape=((batch_size,) + self.observation_space.shape),
dtype=self.observation_space.dtype
)
(logits, self._values_t) = rl.get_policy(
self._observations_t, policy_hparams, self.action_space
)
actions = common_layers.sample_with_temperature(logits, sampling_temp)
self._probs_t = tf.nn.softmax(logits / sampling_temp)
self._actions_t = tf.cast(actions, tf.int32)
model_saver = tf.train.Saver(
tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
示例4: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def __init__(self, *args, **kwargs):
with tf.Graph().as_default():
self._batch_env = SimulatedBatchEnv(*args, **kwargs)
self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t)
with tf.control_dependencies([self._rewards_t]):
self._obs_t = self._batch_env.observ
self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._reset_op = self._batch_env.reset(
tf.range(self.batch_size, dtype=tf.int32)
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
self._batch_env.initialize(self._sess)
示例5: test_invertibility
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def test_invertibility(self, op, name, dropout=0.0):
with tf.Graph().as_default():
tf.set_random_seed(42)
x = tf.random_uniform(shape=(16, 32, 32, 4))
if op in [glow_ops.affine_coupling, glow_ops.additive_coupling]:
with arg_scope([glow_ops.get_dropout], init=False):
x_inv, _ = op(name, x, reverse=False, dropout=dropout)
x_inv_inv, _ = op(name, x_inv, reverse=True, dropout=dropout)
else:
x_inv, _ = op(name, x, reverse=False)
x_inv_inv, _ = op(name, x_inv, reverse=True)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
diff = session.run(x - x_inv_inv)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
示例6: test_temperature_normal
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def test_temperature_normal(self, temperature):
with tf.Graph().as_default():
rng = np.random.RandomState(0)
# in numpy, so that multiple calls don't trigger different random numbers.
loc_t = tf.convert_to_tensor(rng.randn(5, 5))
scale_t = tf.convert_to_tensor(rng.rand(5, 5))
tempered_normal = glow_ops.TemperedNormal(
loc=loc_t, scale=scale_t, temperature=temperature)
# smoke test for a single sample.
smoke_sample = tempered_normal.sample()
samples = tempered_normal.sample((10000,), seed=0)
with tf.Session() as sess:
ops = [samples, loc_t, scale_t, smoke_sample]
samples_np, loc_exp, scale_exp, _ = sess.run(ops)
scale_exp *= temperature
loc_act = np.mean(samples_np, axis=0)
scale_act = np.std(samples_np, axis=0)
self.assertTrue(np.allclose(loc_exp, loc_act, atol=1e-2))
self.assertTrue(np.allclose(scale_exp, scale_act, atol=1e-2))
示例7: linear_interpolate_rank
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def linear_interpolate_rank(self):
with tf.Graph().as_default():
# Since rank is 1, the first channel should remain 1.0.
# and the second channel should be interpolated between 1.0 and 6.0
z1 = np.ones(shape=(4, 4, 2))
z2 = np.copy(z1)
z2[:, :, 0] += 0.01
z2[:, :, 1] += 5.0
coeffs = np.linspace(0.0, 1.0, 11)
z1 = np.expand_dims(z1, axis=0)
z2 = np.expand_dims(z2, axis=0)
tensor1 = tf.convert_to_tensor(z1, dtype=tf.float32)
tensor2 = tf.convert_to_tensor(z2, dtype=tf.float32)
lin_interp_max = glow_ops.linear_interpolate_rank(
tensor1, tensor2, coeffs)
with tf.Session() as sess:
lin_interp_np_max = sess.run(lin_interp_max)
for lin_interp_np, coeff in zip(lin_interp_np_max, coeffs):
exp_val = 1.0 + coeff * (6.0 - 1.0)
self.assertTrue(np.allclose(lin_interp_np[:, :, 0], 1.0))
self.assertTrue(np.allclose(lin_interp_np[:, :, 1], exp_val))
示例8: testSpectralNorm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def testSpectralNorm(self):
# Test that after 20 calls to apply_spectral_norm, the spectral
# norm of the normalized matrix is close to 1.0
with tf.Graph().as_default():
weights = tf.get_variable("w", dtype=tf.float32, shape=[2, 3, 50, 100])
weights = tf.multiply(weights, 10.0)
normed_weight, assign_op = common_layers.apply_spectral_norm(weights)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(20):
sess.run(assign_op)
normed_weight, assign_op = common_layers.apply_spectral_norm(
weights)
normed_weight = sess.run(normed_weight).reshape(-1, 100)
_, s, _ = np.linalg.svd(normed_weight)
self.assertTrue(np.allclose(s[0], 1.0, rtol=0.1))
示例9: testDatasetPacking
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def testDatasetPacking(self):
dataset = tf.data.Dataset.from_generator(
example_generator,
output_types={"inputs": tf.int64, "targets": tf.int64},
output_shapes={"inputs": tf.TensorShape((None,)),
"targets": tf.TensorShape((None,))}
)
dataset = generator_utils.pack_dataset(
dataset, length=5, keys=("inputs", "targets"), use_custom_ops=False)
with tf.Session().as_default() as sess:
batch = dataset.make_one_shot_iterator().get_next()
for reference in reference_packing():
example = sess.run(batch)
self.assertAllEqual(set(example.keys()), set(reference.keys()))
for k in reference:
self.assertAllEqual(example[k], reference[k])
示例10: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def __init__(self, batch_size, *args, **kwargs):
self._store_rollouts = kwargs.pop("store_rollouts", True)
super(T2TEnv, self).__init__(*args, **kwargs)
self.batch_size = batch_size
self._rollouts_by_epoch_and_split = collections.OrderedDict()
self.current_epoch = None
self._should_preprocess_on_reset = True
with tf.Graph().as_default() as tf_graph:
self._tf_graph = _Noncopyable(tf_graph)
self._decoded_image_p = _Noncopyable(
tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
)
self._encoded_image_t = _Noncopyable(
tf.image.encode_png(self._decoded_image_p.obj)
)
self._encoded_image_p = _Noncopyable(tf.placeholder(tf.string))
self._decoded_image_t = _Noncopyable(
tf.image.decode_png(self._encoded_image_p.obj)
)
self._session = _Noncopyable(tf.Session())
示例11: generate_samples
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def generate_samples(self, data_dir, tmp_dir, dataset_split):
with tf.Graph().as_default():
# train and eval set are generated on-the-fly.
# test set is the official test-set.
if dataset_split == problem.DatasetSplit.TEST:
moving_ds = self.get_test_iterator(tmp_dir)
else:
moving_ds = self.get_train_iterator()
next_video = moving_ds.get_next()
with tf.Session() as sess:
sess.run(moving_ds.initializer)
n_samples = SPLIT_TO_SIZE[dataset_split]
for _ in range(n_samples):
next_video_np = sess.run(next_video)
for frame_number, frame in enumerate(next_video_np):
yield {
"frame_number": [frame_number],
"frame": frame,
}
示例12: export_module_spec_with_checkpoint
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
"""Exports given checkpoint as tfhub module with given spec."""
# The main requirement is that it is possible to know how to map from
# module variable name to checkpoint variable name.
# This is trivial if the original code used variable scopes,
# but can be messy if the variables to export are interwined
# with variables not export.
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session)
示例13: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
示例14: _init_graph
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.refiner = im.ImNet(dim=self.dim,
in_features=self.codelen,
out_features=self.out_features,
num_filters=self.num_filters)
self.global_step = tf.get_variable('global_step', shape=[],
dtype=tf.int64)
self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])
lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
[self.point_batch, self.codelen])
code = tf.concat((self.pts_ph, lat), axis=-1) # [pb, 3+c]
vals = self.refiner(code, training=False) # [pb, 1]
self.vals = tf.squeeze(vals, axis=1) # [pb]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
示例15: testIndexedSlicesGradIsClippedCorrectly
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Session [as 别名]
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = tf.constant(self._grad_vec, dtype=tf.float32)
indices = tf.constant(sparse_grad_indices, dtype=tf.int32)
dense_shape = tf.constant(sparse_grad_dense_shape, dtype=tf.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with tf.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)