本文整理汇总了Python中tensorflow.compat.v2.where方法的典型用法代码示例。如果您正苦于以下问题:Python v2.where方法的具体用法?Python v2.where怎么用?Python v2.where使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v2
的用法示例。
在下文中一共展示了v2.where方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tril
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = tf.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return utils.tensor_to_ndarray(
tf.where(tf.broadcast_to(mask, tf.shape(m)), m, z))
示例2: triu
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = tf.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return utils.tensor_to_ndarray(
tf.where(tf.broadcast_to(mask, tf.shape(m)), z, m))
示例3: _tf_gcd
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def _tf_gcd(x1, x2):
def _gcd_cond_fn(x1, x2):
return tf.reduce_any(x2 != 0)
def _gcd_body_fn(x1, x2):
# tf.math.mod will raise an error when any element of x2 is 0. To avoid
# that, we change those zeros to ones. Their values don't matter because
# they won't be used.
x2_safe = tf.where(x2 != 0, x2, tf.constant(1, x2.dtype))
x1, x2 = (tf.where(x2 != 0, x2, x1),
tf.where(x2 != 0, tf.math.mod(x1, x2_safe),
tf.constant(0, x2.dtype)))
return (tf.where(x1 < x2, x2, x1), tf.where(x1 < x2, x1, x2))
if (not np.issubdtype(x1.dtype.as_numpy_dtype, np.integer) or
not np.issubdtype(x2.dtype.as_numpy_dtype, np.integer)):
raise ValueError("Arguments to gcd must be integers.")
shape = tf.broadcast_static_shape(x1.shape, x2.shape)
x1 = tf.broadcast_to(x1, shape)
x2 = tf.broadcast_to(x2, shape)
gcd, _ = tf.while_loop(_gcd_cond_fn, _gcd_body_fn,
(tf.math.abs(x1), tf.math.abs(x2)))
return gcd
示例4: _should_stop
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def _should_stop(state, stopping_policy_fn):
"""Indicates whether the overall Brent search should continue.
Args:
state: A Python `_BrentSearchState` namedtuple.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
Returns:
A boolean value indicating whether the overall search should continue.
"""
return tf.convert_to_tensor(
stopping_policy_fn(state.finished), name="should_stop", dtype=tf.bool)
# This is a direct translation of the Brent root-finding method.
# Each operation is guarded by a call to `tf.where` to avoid performing
# unnecessary calculations.
示例5: _get_forward_rate
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def _get_forward_rate(self, valuation_date, market):
"""Returns the relevant forward rates from the market data."""
forward_rates = market.reference_curve.get_forward_rate(
self._accrual_start_dates,
self._accrual_end_dates,
self._daycount_fractions)
forward_rates = tf.where(self._daycount_fractions > 0.0, forward_rates,
tf.zeros_like(forward_rates))
libor_rate = rc.get_rate_index(
market, self._start_date, rc.RateIndexType.LIBOR, dtype=self._dtype)
libor_rate = tf.repeat(
tf.convert_to_tensor(libor_rate, dtype=self._dtype), self._num_caplets)
forward_rates = tf.where(
self._accrual_end_dates < valuation_date,
tf.constant(0., dtype=self._dtype),
tf.where(self._accrual_start_dates < valuation_date, libor_rate,
forward_rates))
return forward_rates
示例6: _updated_cashflow
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def _updated_cashflow(num_times, exercise_index, exercise_value,
expected_continuation, cashflow):
"""Revises the cashflow tensor where options will be exercised earlier."""
do_exercise_bool = exercise_value > expected_continuation
do_exercise = tf.cast(do_exercise_bool, exercise_value.dtype)
# Shape [num_samples, payoff_dim]
scaled_do_exercise = tf.where(do_exercise_bool, exercise_value,
tf.zeros_like(exercise_value))
# This picks out the samples where we now wish to exercise.
# Shape [num_samples, payoff_dim, 1]
new_samp_masked = tf.expand_dims(scaled_do_exercise, axis=2)
# This should be one on the current time step and zero otherwise.
# This is an array with nonzero entries showing newly exercised payoffs.
zeros = tf.zeros_like(cashflow)
mask = tf.equal(tf.range(0, num_times), exercise_index - 1)
new_cash = tf.where(mask, new_samp_masked, zeros)
# Has shape [num_samples, payoff_dim, 1]
old_mask = tf.expand_dims(1 - do_exercise, axis=2)
mask = tf.range(0, num_times) >= exercise_index
old_mask = tf.where(mask, old_mask, zeros)
# Shape [num_samples, payoff_dim, num_times]
old_cash = old_mask * cashflow
return new_cash + old_cash
示例7: business_days_between
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def business_days_between(self, from_dates, to_dates):
"""Calculates number of business between pairs of dates.
For each pair, the initial date is included in the difference, and the final
date is excluded. If the final date is the same or earlier than the initial
date, zero is returned.
Args:
from_dates: `DateTensor` of initial dates.
to_dates: `DateTensor` of final dates, should be broadcastable to
`from_dates`.
Returns:
An int32 Tensor with the number of business days between the
corresponding pairs of dates.
"""
from_biz, from_is_bizday = self._to_biz_space(
dt.convert_to_date_tensor(from_dates).ordinal())
to_biz, to_is_bizday = self._to_biz_space(
dt.convert_to_date_tensor(to_dates).ordinal())
from_biz = tf.where(from_is_bizday, from_biz, from_biz + 1)
to_biz = tf.where(to_is_bizday, to_biz, to_biz + 1)
return tf.math.maximum(to_biz - from_biz, 0)
示例8: to_tensor
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def to_tensor(self):
"""Packs the dates into a single Tensor.
The Tensor has shape `date_tensor.shape() + (3,)`, where the last dimension
represents years, months and days, in this order.
This can be convenient when the dates are the final result of a computation
in the graph mode: a `tf.function` can return `date_tensor.to_tensor()`, or,
if one uses `tf.compat.v1.Session`, they can call
`session.run(date_tensor.to_tensor())`.
Returns:
A Tensor of shape `date_tensor.shape() + (3,)`.
#### Example
```python
dates = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)])
dates.to_tensor() # tf.Tensor with contents [[2019, 1, 25], [2020, 3, 2]].
```
"""
return tf.stack((self.year(), self.month(), self.day()), axis=-1)
示例9: build_model
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def build_model(self, feature_ph_dict, labels_ph, params=None, context=None):
"""(DEPRECATED) Construct and return a Model.
Args:
feature_ph_dict: maps feature names to placeholders that will hold the
corresponding inputs.
labels_ph: a placeholder that will hold the target labels
params: optional parameters to be passed to the config_* methods.
context: if provided, use instead of building a fresh context
Returns:
a fully configured Model, where model.context is a
freshly-built context produced by self.build_context().
"""
model = Model()
model.context = context or self.build_context(params=params)
self.config_model(model, feature_ph_dict, labels_ph, params=params)
self.check_model_completeness(model)
return model
示例10: labels_of_top_ranked_predictions_in_batch
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def labels_of_top_ranked_predictions_in_batch(labels, predictions):
"""Applying tf.metrics.mean to this gives precision at 1.
Args:
labels: minibatch of dense 0/1 labels, shape [batch_size rows, num_classes]
predictions: minibatch of predictions of the same shape
Returns:
one-dimension tensor top_labels, where top_labels[i]=1.0 iff the
top-scoring prediction for batch element i has label 1.0
"""
indices_of_top_preds = tf.cast(tf.argmax(input=predictions, axis=1), tf.int32)
batch_size = tf.reduce_sum(input_tensor=tf.ones_like(indices_of_top_preds))
row_indices = tf.range(batch_size)
thresholded_labels = tf.where(labels > 0.0, tf.ones_like(labels),
tf.zeros_like(labels))
label_indices_to_gather = tf.transpose(
a=tf.stack([row_indices, indices_of_top_preds]))
return tf.gather_nd(thresholded_labels, label_indices_to_gather)
示例11: one_hot_numpy_array
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def one_hot_numpy_array(self, entity_name,
type_name):
"""A one-hot 1-by-N matrix encoding this entity.
Args:
entity_name: a string naming an entity.
type_name: the string type name of the named entity
Returns:
A numpy array with shape (1,n) where n is the number of columns.
Raises:
EntityNameError: if entity_name does not map to a legal id.
"""
index = self.get_id(entity_name, type_name)
if index is None:
raise EntityNameError(
entity_name=entity_name,
type_name=type_name,
message='Cannot make a one-hot vector')
result = self.zeros_numpy_array(type_name, True)
result[0, index] = 1.0
return result
# schema operations
示例12: nonneg_softmax
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def nonneg_softmax(expr,
replace_nonpositives = -10):
"""A softmax operator that is appropriate for NQL outputs.
NeuralQueryExpressions often evaluate to sparse vectors of small, nonnegative
values. Softmax for those is dominated by zeros, so this is a fix. This also
fixes the problem that minibatches for NQL are one example per column, not one
example per row.
Args:
expr: a Tensorflow expression for some predicted values.
replace_nonpositives: will replace zeros with this value before computing
softmax.
Returns:
Tensorflow expression for softmax.
"""
if replace_nonpositives != 0.0:
ones = tf.ones(tf.shape(input=expr), tf.float32)
expr = tf.where(expr > 0.0, expr, ones * replace_nonpositives)
return tf.nn.softmax(expr)
示例13: nonneg_crossentropy
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def nonneg_crossentropy(expr, target):
"""A cross entropy operator that is appropriate for NQL outputs.
Query expressions often evaluate to sparse vectors. This evaluates cross
entropy safely.
Args:
expr: a Tensorflow expression for some predicted values.
target: a Tensorflow expression for target values.
Returns:
Tensorflow expression for cross entropy.
"""
expr_replacing_0_with_1 = \
tf.where(expr > 0, expr, tf.ones(tf.shape(input=expr), tf.float32))
cross_entropies = tf.reduce_sum(
input_tensor=-target * tf.math.log(expr_replacing_0_with_1), axis=1)
return tf.reduce_mean(input_tensor=cross_entropies, axis=0)
示例14: __call__
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def __call__(self, multi_objectives: tf.Tensor) -> tf.Tensor:
"""Returns a single reward by scalarizing multiple objectives.
Args:
multi_objectives: A `Tensor` of shape [batch_size, number_of_objectives],
where each column represents an objective.
Returns: A `Tensor` of shape [batch_size] representing scalarized rewards.
Raises:
ValueError: if `multi_objectives.shape.rank != 2`.
ValueError: if
`multi_objectives.shape.dims[1] != self._num_of_objectives`.
"""
if multi_objectives.shape.rank != 2:
raise ValueError('The rank of the input should be 2, but is {}'.format(
multi_objectives.shape.rank))
if multi_objectives.shape.dims[1] != self._num_of_objectives:
raise ValueError(
'The number of input objectives should be {}, but is {}.'.format(
self._num_of_objectives, multi_objectives.shape.dims[1]))
return self.call(multi_objectives)
# Subclasses must implement these methods.
示例15: _log_prob_with_logsf_and_logcdf
# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import where [as 别名]
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - .5) - sf(y + .5) ]
# = Log[ exp{logsf(y - .5)} - exp{logsf(y + .5)} ]
# Log[ cdf(y + .5) - cdf(y - .5) ]
# = Log[ exp{logcdf(y + .5)} - exp{logcdf(y - .5)} ]
logsf_y_plus = self.base.log_survival_function(y + .5)
logsf_y_minus = self.base.log_survival_function(y - .5)
logcdf_y_plus = self.base.log_cdf(y + .5)
logcdf_y_minus = self.base.log_cdf(y - .5)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
condition = logsf_y_plus < logcdf_y_plus
big = tf.where(condition, logsf_y_minus, logcdf_y_plus)
small = tf.where(condition, logsf_y_plus, logcdf_y_minus)
return _logsum_expbig_minus_expsmall(big, small)