本文整理汇总了Python中tensorflow.compat.v1.bool方法的典型用法代码示例。如果您正苦于以下问题:Python v1.bool方法的具体用法?Python v1.bool怎么用?Python v1.bool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.bool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _rollout_metadata
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def _rollout_metadata(batch_env, distributional_size=1):
"""Metadata for rollouts."""
batch_env_shape = batch_env.observ.get_shape().as_list()
batch_size = [batch_env_shape[0]]
value_size = batch_size
if distributional_size > 1:
value_size = batch_size + [distributional_size]
shapes_types_names = [
# TODO(piotrmilos): possibly retrieve the observation type for batch_env
(batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"),
(batch_size, tf.float32, "reward"),
(batch_size, tf.bool, "done"),
(batch_size + list(batch_env.action_shape), batch_env.action_dtype,
"action"),
(batch_size, tf.float32, "pdf"),
(value_size, tf.float32, "value_function"),
]
return shapes_types_names
示例2: set_recall
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例3: pad_batch
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features
# TODO(lukaszkaiser): refactor the API to not be just a list of self params
# but make sense for other uses too.
示例4: revnet
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def revnet(name, x, hparams, reverse=True):
"""'hparams.depth' steps of generative flow.
Args:
name: variable scope for the revnet block.
x: 4-D Tensor, shape=(NHWC).
hparams: HParams.
reverse: bool, forward or backward pass.
Returns:
x: 4-D Tensor, shape=(NHWC).
objective: float.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
steps = np.arange(hparams.depth)
if reverse:
steps = steps[::-1]
objective = 0.0
for step in steps:
x, curr_obj = revnet_step(
"revnet_step_%d" % step, x, hparams, reverse=reverse)
objective += curr_obj
return x, objective
示例5: lengths_to_area_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def lengths_to_area_mask(feature_length, length, max_area_size):
"""Generates a non-padding mask for areas based on lengths.
Args:
feature_length: a tensor of [batch_size]
length: the length of the batch
max_area_size: the maximum area size considered
Returns:
mask: a tensor in shape of [batch_size, num_areas]
"""
paddings = tf.cast(tf.expand_dims(
tf.logical_not(
tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
_, _, area_sum, _, _ = compute_area_features(paddings,
max_area_width=max_area_size)
mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
return mask
示例6: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def __init__(self, regularizers_to_group):
"""Creates an instance.
Args:
regularizers_to_group: A list of generic_regularizers.OpRegularizer
objects.Their regularization_vector (alive_vector) are expected to be of
the same length.
Raises:
ValueError: regularizers_to_group is not of length at least 2.
"""
if len(regularizers_to_group) < 2:
raise ValueError('Groups must be of at least size 2.')
self._regularization_vector = tf.add_n(
[r.regularization_vector for r in regularizers_to_group])
self._alive_vector = tf.cast(
tf.ones(self._regularization_vector.get_shape()[-1]), tf.bool)
示例7: _flat_reconstruction_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
b_enc, b_dec = tf.split(
flat_rnn_output,
[self._nade.num_hidden, self._output_depth], axis=1)
ll, cond_probs = self._nade.log_prob(
flat_x_target, b_enc=b_enc, b_dec=b_dec)
r_loss = -ll
flat_truth = tf.cast(flat_x_target, tf.bool)
flat_predictions = tf.greater_equal(cond_probs, 0.5)
metric_map = {
'metrics/accuracy':
tf.metrics.mean(
tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
'metrics/recall':
tf.metrics.recall(flat_truth, flat_predictions),
'metrics/precision':
tf.metrics.precision(flat_truth, flat_predictions),
}
return r_loss, metric_map
示例8: get_placeholders
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def get_placeholders(self):
hparams = self.hparams
return dict(
pianorolls=tf.placeholder(
tf.bool,
[None, None, hparams.num_pitches, hparams.num_instruments],
"pianorolls"),
# The default value is only used for checking if completion masker
# should be evoked. It can't be used directly as the batch size
# and length of pianorolls are unknown during static time.
outer_masks=tf.placeholder_with_default(
np.zeros(
(1, 1, hparams.num_pitches, hparams.num_instruments),
dtype=np.float32),
[None, None, hparams.num_pitches, hparams.num_instruments],
"outer_masks"),
sample_steps=tf.placeholder_with_default(0, (), "sample_steps"),
total_gibbs_steps=tf.placeholder_with_default(
0, (), "total_gibbs_steps"),
current_step=tf.placeholder_with_default(0, (), "current_step"),
temperature=tf.placeholder_with_default(0.99, (), "temperature"))
示例9: get_signature_def
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def get_signature_def(model, use_tf_sampling):
"""Creates a signature def for the SavedModel."""
if use_tf_sampling:
return tf.saved_model.signature_def_utils.predict_signature_def(
inputs={
'pianorolls': model.inputs['pianorolls'],
}, outputs={
'predictions': tf.cast(model.samples, tf.bool),
})
return tf.saved_model.signature_def_utils.predict_signature_def(
inputs={
'pianorolls': model.model.pianorolls,
'masks': model.model.masks,
'lengths': model.model.lengths,
}, outputs={
'predictions': model.model.predictions
})
示例10: tracks_own_finished
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `dynamic_decode` function keeps track
of which batch entries are already finished, and performs a logical OR to
insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`dynamic_decode` will mix up the finished state across these entries because
it does not track the reshuffle across time steps. In this case, it is
up to the decoder to declare that it will keep track of its own finished
state by setting this property to `True`.
Returns:
Python bool.
"""
return False
# TODO(scottzhu): Add build/get_config/from_config and other layer methods.
示例11: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tf.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = tf.shape(start_inputs)[0]
self._start_inputs = tf.convert_to_tensor(
start_inputs, name="start_inputs")
示例12: _build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def _build(self, inputs, labels):
def cond(i, unused_attack, success):
# If we are already successful, we break.
return tf.logical_and(i < self._num_restarts,
tf.logical_not(tf.reduce_all(success)))
def body(i, attack, success):
new_attack = self._inner_attack(inputs, labels)
new_success = self._inner_attack.success
# The first iteration always sets the attack.
use_new_values = tf.logical_or(tf.equal(i, 0), new_success)
return (i + 1,
tf.where(use_new_values, new_attack, attack),
tf.logical_or(success, new_success))
_, self._attack, self._success = tf.while_loop(
cond, body, back_prop=False, parallel_iterations=1,
loop_vars=[
tf.constant(0, dtype=tf.int32),
inputs,
tf.zeros([tf.shape(inputs)[0]], dtype=tf.bool),
])
self._logits = self._eval_fn(self._attack, mode='final')
return self._attack
示例13: sparse_reduce
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def sparse_reduce(sp_tensor, rank, agg_fn="sum", axis=-1):
"""Reduce SparseTensor along the given axis.
Args:
sp_tensor: SparseTensor of arbitrary rank.
rank: Integer rank of the sparse tensor.
agg_fn: Reduce function for aggregation.
axis: Integer specifying axis to sum over.
Returns:
sp_tensor: SparseTensor of one less rank.
"""
if axis < 0:
axis = rank + axis
axes_to_keep = tf.one_hot(
axis, rank, on_value=False, off_value=True, dtype=tf.bool)
indices_to_keep = tf.boolean_mask(sp_tensor.indices, axes_to_keep, axis=1)
new_shape = tf.boolean_mask(sp_tensor.dense_shape, axes_to_keep)
indices_to_keep.set_shape([None, rank - 1])
indices, values = aggregate_sparse_indices(
indices_to_keep, sp_tensor.values, new_shape, agg_fn=agg_fn)
return tf.sparse.reorder(tf.SparseTensor(indices, values, new_shape))
示例14: batch_boolean_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def batch_boolean_mask(mask):
"""Get indices of true values.
Args:
mask: [batch_size, num_values]
Returns:
true_indices: [batch_size, max_true]
gathered_mask: [batch_size, max_true]
"""
# [batch_size, num_values]
mask = tf.to_int32(mask)
# [batch_size]
num_true = tf.reduce_sum(mask, 1)
# []
max_true = tf.reduce_max(num_true)
# [batch_size, max_true]
gathered_mask, true_indices = tf.nn.top_k(mask, max_true)
gathered_mask = tf.cast(gathered_mask, tf.bool)
return gathered_mask, true_indices
示例15: load_boolq_file
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import bool [as 别名]
def load_boolq_file(filename, num_par=2):
"""Build a tf.data.Data from a file of boolq examples."""
tokenizer = tokenization.NltkTokenizer()
examples = []
with tf.gfile.Open(filename) as f:
for line in f:
obj = json.loads(line)
context = tokenizer.tokenize(obj["passage"])
if FLAGS.max_passage_len:
context = context[:FLAGS.max_passage_len]
question = tokenizer.tokenize(obj["question"])
examples.append((question, context, obj["answer"]))
def get_data():
out = list(examples)
np.random.shuffle(out)
return out
ds = tf.data.Dataset.from_generator(get_data, (tf.string, tf.string, tf.bool),
([None], [None], []))
def to_dict(p, h, label):
return {"hypothesis": p, "premise": h, "label": label}
return ds.map(to_dict, num_parallel_calls=num_par)