本文整理汇总了Python中tensorflow.python.ops.math_ops.logical_or函数的典型用法代码示例。如果您正苦于以下问题:Python logical_or函数的具体用法?Python logical_or怎么用?Python logical_or使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logical_or函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: body
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
next_sequence_lengths = array_ops.where(
math_ops.logical_and(math_ops.logical_not(finished), next_finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
示例2: _decode
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: T tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`.
Returns:
A decoder image.
"""
def decode_png():
return image_ops.decode_png(image_buffer, self._channels)
def decode_raw():
return parsing_ops.decode_raw(image_buffer, dtypes.uint8)
def decode_jpg():
return image_ops.decode_jpeg(image_buffer, self._channels)
image = control_flow_ops.case({
math_ops.logical_or(math_ops.equal(image_format, 'png'),
math_ops.equal(image_format, 'PNG')): decode_png,
math_ops.logical_or(math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}, default=decode_jpg, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
示例3: _decode
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_png():
return image_ops.decode_png(
image_buffer, self._channels, dtype=self._dtype)
def decode_raw():
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
def decode_jpg():
if self._dtype != dtypes.uint8:
raise ValueError(
'jpeg decoder can only be used to decode to tf.uint8 but %s was '
'requested for a jpeg image.' % self._dtype)
return image_ops.decode_jpeg(image_buffer, self._channels)
# For RGBA images JPEG is not a valid decoder option.
if self._channels > 3:
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
default_decoder = decode_png
else:
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'png'),
math_ops.equal(image_format, 'PNG')): decode_png,
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
default_decoder = decode_jpg
image = control_flow_ops.case(
pred_fn_pairs, default=default_decoder, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
示例4: _decode
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`. If image
format is `raw`, all images are expected to be in this format, otherwise
this op can decode a mix of `jpg` and `png` formats.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_image():
"""Decodes a png or jpg based on the headers."""
return image_ops.decode_image(image_buffer, self._channels)
def decode_raw():
"""Decodes a raw image."""
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
image = control_flow_ops.case(
pred_fn_pairs, default=decode_image, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
示例5: body
def body(time, outputs_ta, state, inputs, finished):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: 1-D bool tensor.
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished)`.
"""
(next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(
time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out), next_outputs,
zero_outputs)
# Copy through states past finish
def _maybe_copy_state(new, cur):
return (new if isinstance(cur, tensor_array_ops.TensorArray) else
array_ops.where(finished, cur, new))
next_state = nest.map_structure(_maybe_copy_state, decoder_state, state)
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished)
示例6: pdf
def pdf(self, x, name="pdf"):
"""The PDF of observations in `x` under these Uniform distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `a` and `b`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`. If `x` is `nan`, will
return `nan`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.a, self.b, x], name):
x = ops.convert_to_tensor(x, name="x")
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s" %
(x.dtype, self.dtype))
broadcasted_x = x * self._ones()
return math_ops.select(
math_ops.is_nan(broadcasted_x), broadcasted_x, math_ops.select(
math_ops.logical_or(broadcasted_x < self.a,
broadcasted_x > self.b),
array_ops.zeros_like(broadcasted_x),
(1.0 / self.range()) * array_ops.ones_like(broadcasted_x)))
示例7: body
def body(time, elements_finished, current_input, emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state
)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(elements_finished, current_i, candidate_i), device=candidate_i.op.device
)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)
]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [ta.write(time, emit) for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state)
示例8: _dynamic_rank_in
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
示例9: next_inputs
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
示例10: _prob
def _prob(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape_tensor())
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
示例11: _prob
def _prob(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape())
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.a,
broadcasted_x > self.b),
array_ops.zeros_like(broadcasted_x),
(1. / self.range()) * array_ops.ones_like(broadcasted_x)))
示例12: _decode
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`. If image
format is `raw`, all images are expected to be in this format, otherwise
this op can decode a mix of `jpg` and `png` formats.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_image():
"""Decodes a image based on the headers."""
return math_ops.cast(
image_ops.decode_image(image_buffer, channels=self._channels),
self._dtype)
def decode_jpeg():
"""Decodes a jpeg image with specified '_dct_method'."""
return math_ops.cast(
image_ops.decode_jpeg(
image_buffer,
channels=self._channels,
dct_method=self._dct_method), self._dtype)
def check_jpeg():
"""Checks if an image is jpeg."""
# For jpeg, we directly use image_ops.decode_jpeg rather than decode_image
# in order to feed the jpeg specify parameter 'dct_method'.
return control_flow_ops.cond(
image_ops.is_jpeg(image_buffer),
decode_jpeg,
decode_image,
name='cond_jpeg')
def decode_raw():
"""Decodes a raw image."""
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
image = control_flow_ops.case(
pred_fn_pairs, default=check_jpeg, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
示例13: max_reduce_fn
def max_reduce_fn(state, value):
"""Computes the maximum shape to pad to."""
condition = math_ops.reduce_all(
math_ops.logical_or(
math_ops.less_equal(value.dense_shape, padded_shape),
math_ops.equal(padded_shape, -1)))
assert_op = control_flow_ops.Assert(condition, [
"Actual shape greater than padded shape: ", value.dense_shape,
padded_shape
])
with ops.control_dependencies([assert_op]):
return math_ops.maximum(state, value.dense_shape)
示例14: softplus_inverse
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
示例15: _maybe_convert_labels
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true