本文整理汇总了Python中tensorflow.compat.v1.int64方法的典型用法代码示例。如果您正苦于以下问题:Python v1.int64方法的具体用法?Python v1.int64怎么用?Python v1.int64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.int64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: encode_knowledge_bottom
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def encode_knowledge_bottom(self, features):
tf.logging.info("Encoding knowledge " + str(self.triple_num))
# Make sure this is embeddings for triples
# <tf.float32>[batch_size, triple_num*max_triple_length, 1, emb_dim]
fact_embedding = features["encoded_triples"]
# [batch_size, triple_num*max_triple_length, emb_dim]
fact_embedding = tf.squeeze(fact_embedding, 2)
kb_shape = common_layers.shape_list(fact_embedding)
batch_size = kb_shape[0]
embed_dim = kb_shape[2]
# <tf.float32>[batch_size*triple_num, max_triple_length, emb_dim]
re_fact_embedding = tf.reshape(
fact_embedding, [batch_size * self.triple_num, -1, embed_dim],
name="reshape_fact_embedding")
# <tf.int64>[batch_size, triple_num]
input_fact_lengths = features["triple_lens"]
# Stack the fact lengths.
# <tf.int64>[batch_size*max_triple_num]
re_fact_lengths = tf.reshape(
input_fact_lengths, [batch_size * self.triple_num, 1],
name="reshape_fact_lengths")
return re_fact_embedding, re_fact_lengths
示例2: compute_last_embedding
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def compute_last_embedding(input_embeddings, input_lengths, hparams):
"""Computes average of last K embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
hparams: model hparams
Returns:
last_k_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, 1, max_seq_len]
mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
del_mask = tf.sequence_mask(
input_lengths - hparams.last_k, max_seq_len, dtype=tf.float32)
final_mask = mask - del_mask
# <tf.float32>[bs, 1, emb_dim]
sum_embedding = tf.matmul(final_mask, input_embeddings)
# <tf.float32>[bs, 1, emb_dim]
last_k_embedding = sum_embedding / tf.to_float(
tf.expand_dims(
tf.ones([tf.shape(input_embeddings)[0], 1]) * hparams.last_k, 2))
# <tf.float32>[bs, dim]
return tf.squeeze(last_k_embedding, 1)
示例3: compute_max_pool_embedding
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def compute_max_pool_embedding(input_embeddings, input_lengths):
"""Computes max pool embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
Returns:
max_pool_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, max_seq_len]
mask = 1.0 - tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
mask = tf.squeeze(mask * (-1e-6), 1)
mask = tf.expand_dims(mask, 2)
# <tf.float32>[bs, emb_dim]
max_pool_embedding = tf.reduce_max(input_embeddings + mask, 1)
# <tf.float32>[bs, dim]
return max_pool_embedding
示例4: compute_average_embedding
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def compute_average_embedding(input_embeddings, input_lengths):
"""Computes bag-of-words embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
Returns:
bow_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, 1, max_seq_len]
mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
# <tf.float32>[bs, 1, emb_dim]
sum_embedding = tf.matmul(mask, input_embeddings)
# <tf.float32>[bs, 1, emb_dim]
avg_embedding = sum_embedding / tf.to_float(tf.expand_dims(input_lengths, 2))
# <tf.float32>[bs, dim]
return tf.squeeze(avg_embedding, 1)
示例5: testDatasetPacking
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def testDatasetPacking(self):
dataset = tf.data.Dataset.from_generator(
example_generator,
output_types={"inputs": tf.int64, "targets": tf.int64},
output_shapes={"inputs": tf.TensorShape((None,)),
"targets": tf.TensorShape((None,))}
)
dataset = generator_utils.pack_dataset(
dataset, length=5, keys=("inputs", "targets"), use_custom_ops=False)
with tf.Session().as_default() as sess:
batch = dataset.make_one_shot_iterator().get_next()
for reference in reference_packing():
example = sess.run(batch)
self.assertAllEqual(set(example.keys()), set(reference.keys()))
for k in reference:
self.assertAllEqual(example[k], reference[k])
示例6: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def example_reading_spec(self):
data_fields, data_items_to_decoders = (
super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec())
data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_fields["image/answer"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
slim = contrib.slim()
data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
"image/question")
data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
"image/answer")
return data_fields, data_items_to_decoders
示例7: process_rewards
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def process_rewards(self, rewards):
"""Clips the rewards, optionally rounds them and casts to integer.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64
"""
min_reward, max_reward = self.reward_range
# Clips at min and max reward.
rewards = np.clip(rewards, min_reward, max_reward)
if self._discrete_rewards:
# Round to (nearest) int and convert to integral type.
rewards = np.around(rewards, decimals=0).astype(np.int64)
return rewards
示例8: example_reading_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def example_reading_spec(self):
"""Return a mix of env and video data fields and decoders."""
slim = contrib.slim()
video_fields, video_decoders = (
video_utils.VideoProblem.example_reading_spec(self))
env_fields, env_decoders = (
gym_env_problem.GymEnvProblem.example_reading_spec(self))
# Remove raw observations field since we want to capture them as videos.
env_fields.pop(env_problem.OBSERVATION_FIELD)
env_decoders.pop(env_problem.OBSERVATION_FIELD)
# Add frame number spec and decoder.
env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64)
env_decoders[_FRAME_NUMBER_FIELD] = slim.tfexample_decoder.Tensor(
_FRAME_NUMBER_FIELD)
# Add video fields and decoders
env_fields.update(video_fields)
env_decoders.update(video_decoders)
return env_fields, env_decoders
示例9: _init_graph
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def _init_graph(self):
"""Initialize computation graph for tensorflow.
"""
with self.graph.as_default():
self.refiner = im.ImNet(dim=self.dim,
in_features=self.codelen,
out_features=self.out_features,
num_filters=self.num_filters)
self.global_step = tf.get_variable('global_step', shape=[],
dtype=tf.int64)
self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])
lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
[self.point_batch, self.codelen])
code = tf.concat((self.pts_ph, lat), axis=-1) # [pb, 3+c]
vals = self.refiner(code, training=False) # [pb, 1]
self.vals = tf.squeeze(vals, axis=1) # [pb]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
示例10: testDecodeExampleWithInt64Tensor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = tf.train.Example(
features=tf.train.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, tf.int64)
}
items_to_handlers = {
'array': tfexample_decoder.Tensor('array'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
示例11: testDecodeExampleWithVarLenTensor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = tf.train.Example(
features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
示例12: testDecodeExampleWithFixLenTensorWithShape
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = tf.train.Example(
features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.FixedLenFeature(np_array.shape, dtype=tf.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
示例13: testDecodeExampleWithVarLenTensorToDense
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def testDecodeExampleWithVarLenTensorToDense(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = tf.train.Example(
features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.cached_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
示例14: serving_input_receiver_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf.estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
示例15: categorical_sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int64 [as 别名]
def categorical_sample(logits, dtype=tf.int32,
sample_shape=(), seed=None):
"""Samples from categorical distribution."""
logits = tf.convert_to_tensor(logits, name="logits")
event_size = tf.shape(logits)[-1]
batch_shape_tensor = tf.shape(logits)[:-1]
def _sample_n(n):
"""Sample vector of categoricals."""
if logits.shape.ndims == 2:
logits_2d = logits
else:
logits_2d = tf.reshape(logits, [-1, event_size])
sample_dtype = tf.int64 if logits.dtype.size > 4 else tf.int32
draws = tf.multinomial(
logits_2d, n, seed=seed, output_dtype=sample_dtype)
draws = tf.reshape(
tf.transpose(draws),
tf.concat([[n], batch_shape_tensor], 0))
return tf.cast(draws, dtype)
return _call_sampler(_sample_n, sample_shape)