本文整理汇总了Python中tensorflow.compat.v1.placeholder方法的典型用法代码示例。如果您正苦于以下问题:Python v1.placeholder方法的具体用法?Python v1.placeholder怎么用?Python v1.placeholder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.placeholder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def build(self, input_shape):
with self._sess.graph.as_default():
self._placeholders["tokens"] = tf.placeholder(
dtype=tf.int32, shape=[None, None], name="tokens"
)
self._ops["output_logits"] = self.compute_logits(
self._placeholders["tokens"]
)
self._ops["output_probs"] = tf.nn.softmax(self._ops["output_logits"], -1)
result = self.compute_loss_and_acc(
rnn_output_logits=self._ops["output_logits"],
target_token_seq=self._placeholders["tokens"],
)
self._ops["loss"] = result.token_ce_loss
self._ops["num_tokens"] = result.num_predictions
self._ops["num_correct_tokens"] = result.num_correct_token_predictions
self._ops["train_step"] = self._make_training_step(self._ops["loss"])
init_op = tf.variables_initializer(
self._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
)
self._sess.run(init_op)
示例2: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def __init__(self, hparams, action_space, observation_space, policy_dir):
assert hparams.base_algo == "ppo"
ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape
self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8)
with tf.Graph().as_default():
self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8)
self.logits_t, self.value_function_t = get_policy(
self.obs_t, ppo_hparams, action_space
)
model_saver = tf.train.Saver(
tf.global_variables(scope=ppo_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver,
self.sess)
示例3: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def __init__(
self, batch_size, observation_space, action_space, policy_hparams,
policy_dir, sampling_temp
):
super(PolicyAgent, self).__init__(
batch_size, observation_space, action_space
)
self._sampling_temp = sampling_temp
with tf.Graph().as_default():
self._observations_t = tf.placeholder(
shape=((batch_size,) + self.observation_space.shape),
dtype=self.observation_space.dtype
)
(logits, self._values_t) = rl.get_policy(
self._observations_t, policy_hparams, self.action_space
)
actions = common_layers.sample_with_temperature(logits, sampling_temp)
self._probs_t = tf.nn.softmax(logits / sampling_temp)
self._actions_t = tf.cast(actions, tf.int32)
model_saver = tf.train.Saver(
tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
示例4: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def __init__(self, *args, **kwargs):
with tf.Graph().as_default():
self._batch_env = SimulatedBatchEnv(*args, **kwargs)
self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t)
with tf.control_dependencies([self._rewards_t]):
self._obs_t = self._batch_env.observ
self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._reset_op = self._batch_env.reset(
tf.range(self.batch_size, dtype=tf.int32)
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
self._batch_env.initialize(self._sess)
示例5: get_zipped_dataset_from_predictions
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def get_zipped_dataset_from_predictions(predictions):
"""Creates dataset from in-memory predictions."""
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos, num_steps = targets.shape[:2]
# Truncate output time-steps to match target time-steps
outputs = outputs[:, :num_steps]
targets_placeholder = tf.placeholder(targets.dtype, targets.shape)
outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(targets_placeholder, outputs_placeholder))
iterator = dataset.make_initializable_iterator()
feed_dict = {targets_placeholder: targets,
outputs_placeholder: outputs}
return iterator, feed_dict, num_videos
示例6: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def __init__(self, batch_size, *args, **kwargs):
self._store_rollouts = kwargs.pop("store_rollouts", True)
super(T2TEnv, self).__init__(*args, **kwargs)
self.batch_size = batch_size
self._rollouts_by_epoch_and_split = collections.OrderedDict()
self.current_epoch = None
self._should_preprocess_on_reset = True
with tf.Graph().as_default() as tf_graph:
self._tf_graph = _Noncopyable(tf_graph)
self._decoded_image_p = _Noncopyable(
tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
)
self._encoded_image_t = _Noncopyable(
tf.image.encode_png(self._decoded_image_p.obj)
)
self._encoded_image_p = _Noncopyable(tf.placeholder(tf.string))
self._decoded_image_t = _Noncopyable(
tf.image.decode_png(self._encoded_image_p.obj)
)
self._session = _Noncopyable(tf.Session())
示例7: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
示例8: _init_graph
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.refiner = im.ImNet(dim=self.dim,
in_features=self.codelen,
out_features=self.out_features,
num_filters=self.num_filters)
self.global_step = tf.get_variable('global_step', shape=[],
dtype=tf.int64)
self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])
lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
[self.point_batch, self.codelen])
code = tf.concat((self.pts_ph, lat), axis=-1) # [pb, 3+c]
vals = self.refiner(code, training=False) # [pb, 1]
self.vals = tf.squeeze(vals, axis=1) # [pb]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
示例9: build_squad_serving_input_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def build_squad_serving_input_fn(seq_length):
"""Builds a serving input fn for raw input."""
def _seq_serving_input_fn():
"""Serving input fn for raw images."""
input_ids = tf.placeholder(
shape=[1, seq_length], name="input_ids", dtype=tf.int32)
input_mask = tf.placeholder(
shape=[1, seq_length], name="input_mask", dtype=tf.int32)
segment_ids = tf.placeholder(
shape=[1, seq_length], name="segment_ids", dtype=tf.int32)
inputs = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
return tf.estimator.export.ServingInputReceiver(features=inputs,
receiver_tensors=inputs)
return _seq_serving_input_fn
示例10: serving_input_receiver_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf.estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
示例11: _ValidateProvideBatchPlaceholder
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def _ValidateProvideBatchPlaceholder(self,
truncated_length,
batch_size,
lengths,
expected_num_inputs):
examples, expected_inputs = self._CreateExamplesAndExpectedInputs(
truncated_length, lengths, expected_num_inputs)
examples_ph = tf.placeholder(tf.string, [None])
feed_dict = {examples_ph: [e.SerializeToString() for e in examples]}
self._ValidateProvideBatch(
examples_ph,
truncated_length,
batch_size,
expected_inputs,
feed_dict=feed_dict)
示例12: testTfUnsliced
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def testTfUnsliced(self):
converter = self.converter_class(steps_per_quarter=1, slice_bars=None)
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, _, lengths_ = data.convert_to_tensors_op(
sequence, converter)
input_tensors, output_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)]
actual_unsliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
actual_input_tensors)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
示例13: testTfSliced
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def testTfSliced(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None)
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, _, lengths_ = data.convert_to_tensors_op(
sequence, converter)
input_tensors, output_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_sliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
input_tensors)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
示例14: testTfUnslicedChordConditioned
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def testTfUnslicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1,
slice_bars=None,
chord_encoding=note_seq.MajorMinorChordOneHotEncoding())
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, control_tensors_, lengths_ = (
data.convert_to_tensors_op(sequence, converter))
input_tensors, output_tensors, control_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, control_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)]
actual_unsliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
actual_unsliced_chord_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
actual_input_tensors)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
self.assertArraySetsEqual(
self.expected_unsliced_chord_labels, actual_unsliced_chord_labels)
示例15: testTfSlicedChordConditioned
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import placeholder [as 别名]
def testTfSlicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1,
slice_bars=2,
max_tensors_per_notesequence=None,
chord_encoding=note_seq.MajorMinorChordOneHotEncoding())
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, control_tensors_, lengths_ = (
data.convert_to_tensors_op(sequence, converter))
input_tensors, output_tensors, control_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, control_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_sliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
actual_sliced_chord_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
input_tensors)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
self.assertArraySetsEqual(
self.expected_sliced_chord_labels, actual_sliced_chord_labels)