本文整理汇总了Python中tensorflow.compat.v1.int32方法的典型用法代码示例。如果您正苦于以下问题:Python v1.int32方法的具体用法?Python v1.int32怎么用?Python v1.int32使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.int32方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_function
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
示例2: get_synthetic_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
"""Returns the ops to generate synthetic inputs and labels."""
def users_init_val():
return tf.random_uniform((self.batch_size, 1), minval=0,
maxval=_NUM_USERS_20M, dtype=tf.int32)
users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_users')
def items_init_val():
return tf.random_uniform((self.batch_size, 1), minval=0,
maxval=_NUM_ITEMS_20M, dtype=tf.int32)
items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_items')
def labels_init_val():
return tf.random_uniform((self.batch_size,), minval=0, maxval=2,
dtype=tf.int32)
labels = tf.Variable(labels_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_labels')
return [users, items, labels]
示例3: get_synthetic_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
# Synthetic input should be within [0, 255].
image_shape, label_shape = self.get_input_shapes('train')
inputs = tf.truncated_normal(
image_shape,
dtype=self.data_type,
mean=127,
stddev=60,
name=self.model_name + '_synthetic_inputs')
inputs = variables_module.VariableV1(
inputs, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
name=input_name)
labels = tf.random_uniform(
label_shape,
minval=0,
maxval=nclass - 1,
dtype=tf.int32,
name=self.model_name + '_synthetic_labels')
return (inputs, labels)
示例4: mask_from_lengths
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None):
"""Convert a length scalar to a vector of binary masks.
This function will convert a vector of lengths to a matrix of binary masks.
E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]]
Args:
lengths: a d-dimensional vector of integers corresponding to lengths.
max_length: an optional (default: None) scalar-like or 0-dimensional tensor
indicating the maximum length of the masks. If not provided, the maximum
length will be inferred from the lengths vector.
dtype: the dtype of the returned mask, if specified. If None, the dtype of
the lengths will be used.
name: a name for the operation (optional).
Returns:
A d x max_length tensor of binary masks (int32).
"""
with tf.name_scope(name, 'mask_from_lengths'):
dtype = lengths.dtype if dtype is None else dtype
max_length = tf.reduce_max(lengths) if max_length is None else max_length
indexes = tf.range(max_length, dtype=lengths.dtype)
mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1))
cast_mask = tf.cast(mask, dtype)
return tf.stop_gradient(cast_mask)
示例5: build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def build(self, input_shape):
with self._sess.graph.as_default():
self._placeholders["tokens"] = tf.placeholder(
dtype=tf.int32, shape=[None, None], name="tokens"
)
self._ops["output_logits"] = self.compute_logits(
self._placeholders["tokens"]
)
self._ops["output_probs"] = tf.nn.softmax(self._ops["output_logits"], -1)
result = self.compute_loss_and_acc(
rnn_output_logits=self._ops["output_logits"],
target_token_seq=self._placeholders["tokens"],
)
self._ops["loss"] = result.token_ce_loss
self._ops["num_tokens"] = result.num_predictions
self._ops["num_correct_tokens"] = result.num_correct_token_predictions
self._ops["train_step"] = self._make_training_step(self._ops["loss"])
init_op = tf.variables_initializer(
self._sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
)
self._sess.run(init_op)
示例6: compute_logits
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def compute_logits(self, token_ids: tf.Tensor) -> tf.Tensor:
"""
Implements a language model, where each output is conditional on the current
input and inputs processed so far.
Args:
token_ids: int32 tensor of shape [B, T], storing integer IDs of tokens.
Returns:
tf.float32 tensor of shape [B, T, V], storing the distribution over output symbols
for each timestep for each batch element.
"""
# TODO 5# 1) Embed tokens
# TODO 5# 2) Run RNN on embedded tokens
# TODO 5# 3) Project RNN outputs onto the vocabulary to obtain logits.
return rnn_output_logits
示例7: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def __init__(
self, batch_size, observation_space, action_space, policy_hparams,
policy_dir, sampling_temp
):
super(PolicyAgent, self).__init__(
batch_size, observation_space, action_space
)
self._sampling_temp = sampling_temp
with tf.Graph().as_default():
self._observations_t = tf.placeholder(
shape=((batch_size,) + self.observation_space.shape),
dtype=self.observation_space.dtype
)
(logits, self._values_t) = rl.get_policy(
self._observations_t, policy_hparams, self.action_space
)
actions = common_layers.sample_with_temperature(logits, sampling_temp)
self._probs_t = tf.nn.softmax(logits / sampling_temp)
self._actions_t = tf.cast(actions, tf.int32)
model_saver = tf.train.Saver(
tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess)
示例8: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def __init__(self, *args, **kwargs):
with tf.Graph().as_default():
self._batch_env = SimulatedBatchEnv(*args, **kwargs)
self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t)
with tf.control_dependencies([self._rewards_t]):
self._obs_t = self._batch_env.observ
self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
self._reset_op = self._batch_env.reset(
tf.range(self.batch_size, dtype=tf.int32)
)
self._sess = tf.Session()
self._sess.run(tf.global_variables_initializer())
self._batch_env.initialize(self._sess)
示例9: testAccuracyTopKMetric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
示例10: testSequenceEditDistanceMetric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def testSequenceEditDistanceMetric(self):
predictions = np.array([[3, 4, 5, 1, 0, 0],
[2, 1, 3, 4, 0, 0],
[2, 1, 3, 4, 0, 0]])
# Targets are just a bit different:
# - first sequence has a different prediction
# - second sequence has a different prediction and one extra step
# - third sequence is identical
targets = np.array([[5, 4, 5, 1, 0, 0],
[2, 5, 3, 4, 1, 0],
[2, 1, 3, 4, 0, 0]])
# Reshape to match expected input format by metric fns.
predictions = np.reshape(predictions, [3, 6, 1, 1])
targets = np.reshape(targets, [3, 6, 1, 1])
with self.test_session() as session:
scores, weight = metrics.sequence_edit_distance(
tf.one_hot(predictions, depth=6, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual_scores, actual_weight = session.run([scores, weight])
self.assertAlmostEqual(actual_scores, 3.0 / 13)
self.assertEqual(actual_weight, 13)
示例11: testNegativeLogPerplexityMaskedAssert
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def testNegativeLogPerplexityMaskedAssert(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
features = {}
with self.assertRaisesRegexp(
ValueError,
'masked_neg_log_perplexity requires targets_mask feature'):
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity_with_masking(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32),
features)
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
_ = session.run(a)
示例12: testMultilabelMatch3
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def testMultilabelMatch3(self):
predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
targets *= weights
predictions_repeat = np.repeat(predictions, 10, axis=1)
expected = (predictions_repeat == targets).astype(float)
expected = np.sum(expected, axis=(1, 2, 3))
expected = np.minimum(expected / 3.0, 1.)
expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
with self.test_session() as session:
scores, weights_ = metrics.multilabel_accuracy_match3(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a, a_op = tf.metrics.mean(scores, weights_)
session.run(tf.local_variables_initializer())
session.run(tf.global_variables_initializer())
_ = session.run(a_op)
actual = session.run(a)
self.assertAlmostEqual(actual, expected, places=6)
示例13: testRougeLMetricE2E
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def testRougeLMetricE2E(self):
vocab_size = 4
batch_size = 12
seq_length = 12
predictions = tf.one_hot(
np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)),
depth=4,
dtype=tf.float32)
targets = np.random.randint(4, size=(12, 12, 1, 1))
with self.test_session() as session:
scores, _ = rouge.rouge_l_fscore(
predictions,
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
session.run(a)
示例14: pad_batch
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
# TODO(lukaszkaiser): refactor the API to not be just a list of self params
# but make sense for other uses too.
示例15: _create_slots
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import int32 [as 别名]
def _create_slots(self, var_list):
"""Create slot variables for Adam with accumulated gradients."""
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# if iter is initialized as an int32, this optimizer could not run
# with tensorflow_hub with a tensorflow-gpu version
self._create_non_slot_variable(
initial_value=0.0 if self._n == 1 else 1.0,
name="iter",
colocate_with=first_var)
# Create slots for the first and second moments, as well as grad_acc.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "grad_acc", self._name)