本文整理汇总了Python中tensorflow.logical_or方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.logical_or方法的具体用法?Python tensorflow.logical_or怎么用?Python tensorflow.logical_or使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.logical_or方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: simulate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def simulate(self, action):
with tf.name_scope("environment/simulate"): # Do we need this?
initializer = (tf.zeros_like(self._observ),
tf.fill((len(self),), 0.0), tf.fill((len(self),), False))
def not_done_step(a, _):
reward, done = self._batch_env.simulate(action)
with tf.control_dependencies([reward, done]):
# TODO(piotrmilos): possibly ignore envs with done
r0 = tf.maximum(a[0], self._batch_env.observ)
r1 = tf.add(a[1], reward)
r2 = tf.logical_or(a[2], done)
return (r0, r1, r2)
simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
initializer=initializer, parallel_iterations=1,
infer_shape=False)
simulate_ret = [ret[-1, ...] for ret in simulate_ret]
with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2])
示例2: _filter_input_rows
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def _filter_input_rows(self, *row_parts) -> tf.bool:
row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)
#assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
# {row_parts.path_source_token_indices, row_parts.path_indices,
# row_parts.path_target_token_indices, row_parts.context_valid_mask})
# FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
any_word_valid_mask_per_context_part = [
tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part) # scalar
if self.estimator_action.is_evaluate:
cond = any_contexts_is_valid # scalar
else: # training
word_is_valid = tf.greater(
row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV]) # scalar
cond = tf.logical_and(word_is_valid, any_contexts_is_valid) # scalar
return cond # scalar
示例3: Uniform
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def Uniform(name=None):
X = tf.placeholder(config.dtype, name=name)
Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))
def integral(lower, upper):
return tf.cond(
tf.logical_or(
tf.is_inf(tf.cast(lower, config.dtype)),
tf.is_inf(tf.cast(upper, config.dtype))
),
lambda: tf.constant(1, dtype=config.dtype),
lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
)
Distribution.integral = integral
return X
示例4: UniformInt
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def UniformInt(name=None):
X = tf.placeholder(config.int_dtype, name=name)
Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))
def integral(lower, upper):
val = tf.cond(
tf.logical_or(
tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))),
tf.is_inf(tf.floor(tf.cast(upper, config.dtype)))
),
lambda: tf.constant(1, dtype=config.dtype),
lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
)
return val
Distribution.integral = integral
return X
示例5: assert_binary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def assert_binary(tensor, name=None):
"""Asserts that all the values in the tensor are zeros or ones.
Args:
tensor: A tensor of shape `[A1, ..., An]` containing the values we want to
check.
name: A name for this op. Defaults to "assert_binary".
Returns:
The input tensor, with dependence on the assertion operator in the graph.
Raises:
tf.errors.InvalidArgumentError: If any of the values in the tensor is not
zero or one.
"""
if not FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value:
return tensor
with tf.compat.v1.name_scope(name, 'assert_binary', [tensor]):
tensor = tf.convert_to_tensor(value=tensor)
condition = tf.reduce_all(
input_tensor=tf.logical_or(tf.equal(tensor, 0), tf.equal(tensor, 1)))
with tf.control_dependencies([tf.Assert(condition, data=[tensor])]):
return tf.identity(tensor)
示例6: _check_batch_beam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = tf.shape(t)
if rank == 2:
condition = tf.equal(shape[1], batch_size * beam_width)
else:
condition = tf.logical_or(
tf.equal(shape[1], batch_size * beam_width),
tf.logical_and(
tf.equal(shape[1], batch_size),
tf.equal(shape[2], beam_width)))
return tf.Assert(condition, [error_message])
示例7: Attention
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
'''
Args:
Q: Queries. (B, T/r, d)
K: Keys. (B, N, d)
V: Values. (B, N, d)
mononotic_attention: A boolean. At training, it is False.
prev_max_attentions: (B,). At training, it is set to None.
Returns:
R: [Context Vectors; Q]. (B, T/r, 2d)
alignments: (B, N, T/r)
max_attentions: (B, T/r)
'''
A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d))
if mononotic_attention: # for inference
key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N)
reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1]
masks = tf.logical_or(key_masks, reverse_masks)
masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1])
paddings = tf.ones_like(A) * (-2 ** 32 + 1) # (B, T/r, N)
A = tf.where(tf.equal(masks, False), A, paddings)
A = tf.nn.softmax(A) # (B, T/r, N)
max_attentions = tf.argmax(A, -1) # (B, T/r)
R = tf.matmul(A, V)
R = tf.concat((R, Q), -1)
alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)
return R, alignments, max_attentions
示例8: _get_values_from_start_and_end
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
示例9: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, name=None,
reach_max_time=None):
"""Gets the inputs for next step."""
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
if reach_max_time is not None:
all_finished = tf.logical_or(all_finished, reach_max_time)
if self._embedding_args_cnt == 1:
del time, outputs # unused by next_inputs_fn
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
elif self._embedding_args_cnt == 2:
del outputs
# Prepare the position embedding of the next step
times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids, times))
return finished, next_inputs, state
示例10: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, name=None,
reach_max_time=None):
if self._use_finish:
hard_ids = tf.argmax(sample_ids, axis=-1, output_type=tf.int32)
finished = tf.equal(hard_ids, self._end_token)
else:
finished = tf.tile([False], [self._batch_size])
all_finished = tf.reduce_all(finished)
if reach_max_time is not None:
all_finished = tf.logical_or(all_finished, reach_max_time)
if self._stop_gradient:
sample_ids = tf.stop_gradient(sample_ids)
if self._embedding_args_cnt == 1:
del time, outputs # unused by next_inputs_fn
next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(soft_ids=sample_ids))
elif self._embedding_args_cnt == 2:
# Prepare the position embedding of the next step
times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(soft_ids=sample_ids, times=times))
return (finished, next_inputs, state)
示例11: __or__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def __or__(self, other):
return tf.logical_or(self, other)
示例12: __ror__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def __ror__(self, other):
return tf.logical_or(other, self)
示例13: nearest3
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def nearest3(grid, idx, clip=False):
with tf.variable_scope('NearestInterp'):
_, h, w, d, f = grid.get_shape().as_list()
x, y, z = idx[:, 1], idx[:, 2], idx[:, 3]
g_val = tf.gather_nd(grid, tf.cast(tf.round(idx), 'int32'))
if clip:
x_inv = tf.logical_or(x < 0, x > h - 1)
y_inv = tf.logical_or(y < 0, y > w - 1)
z_inv = tf.logical_or(z < 0, x > d - 1)
valid_idx = 1 - \
tf.to_float(tf.logical_or(tf.logical_or(x_inv, y_inv), z_inv))
g_val = g_val * valid_idx[tf.newaxis, ...]
return g_val
示例14: set_logp_to_neg_inf
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def set_logp_to_neg_inf(X, logp, bounds):
"""Set `logp` to negative infinity when `X` is outside the allowed bounds.
# Arguments
X: tensorflow.Tensor
The variable to apply the bounds to
logp: tensorflow.Tensor
The log probability corrosponding to `X`
bounds: list of `Region` objects
The regions corrosponding to allowed regions of `X`
# Returns
logp: tensorflow.Tensor
The newly bounded log probability
"""
conditions = []
for l, u in bounds:
lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)
if not lower_is_neg_inf and upper_is_pos_inf:
conditions.append(tf.greater(X, l))
elif lower_is_neg_inf and not upper_is_pos_inf:
conditions.append(tf.less(X, u))
elif not (lower_is_neg_inf or upper_is_pos_inf):
conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))
if len(conditions) > 0:
is_inside_bounds = conditions[0]
for condition in conditions[1:]:
is_inside_bounds = tf.logical_or(is_inside_bounds, condition)
logp = tf.select(
is_inside_bounds,
logp,
tf.fill(tf.shape(X), config.dtype(-np.inf))
)
return logp
示例15: version_10
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import logical_or [as 别名]
def version_10(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
x_shape = tf_shape(x)
scales = kwargs["tensor_dict"][node.inputs[1]]
n_in_scales_is_one = tf.equal(scales[0], 1)
c_in_scales_is_one = tf.logical_or(tf.equal(scales[1], 1),
tf.equal(scales[3], 1))
assert_n_c_in_scales_are_ones = tf.Assert(
tf.logical_and(n_in_scales_is_one, c_in_scales_is_one), [scales])
with tf.control_dependencies([assert_n_c_in_scales_are_ones]):
x_in_NCHW_format = tf.equal(scales[1], 1)
h_w_scale = tf.where(x_in_NCHW_format, scales[2:], scales[1:3])
h_w_shape = tf.where(x_in_NCHW_format, x_shape[2:], x_shape[1:3])
new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),
tf.int32)
mode = node.attrs.get("mode", "nearest")
if mode.lower() == "linear":
mode = tf.image.ResizeMethod.BILINEAR
else:
mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
def process_NCHW_format(x):
x_t = tf.transpose(x, perm=[0, 2, 3, 1])
y = tf.image.resize(x_t, size=new_h_w_shape, method=mode)
y_t = tf.transpose(y, perm=[0, 3, 1, 2])
return y_t
def process_NHWC_format(x):
y = tf.image.resize(x, size=new_h_w_shape, method=mode)
return y
output = tf.cond(x_in_NCHW_format, lambda: process_NCHW_format(x),
lambda: process_NHWC_format(x))
return [output]