本文整理汇总了Python中tensorflow.python.ops.math_ops.floor_div函数的典型用法代码示例。如果您正苦于以下问题:Python floor_div函数的具体用法?Python floor_div怎么用?Python floor_div使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floor_div函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConsistent
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (
math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs)
).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
# consistentcy with numpy
self.assertAllEqual(tf_result, np_result)
# consistentcy with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (
math_ops.truncatediv(nums, divs) * divs
+ math_ops.truncatemod(nums, divs)
).eval()
expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
(nums.shape[0], divs.shape[1]))
# Consistent with desire to get numerator
self.assertAllEqual(tf3_result, expanded_nums)
# Consistent with desire to get numerator
self.assertAllEqual(tf_result, expanded_nums)
示例2: testConsistent
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (
math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
self.assertAllEqual(tf_result, np_result)
self.assertAllEqual(tf_result, tf2_result)
示例3: _FloorModGrad
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
示例4: testDivideInt
def testDivideInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.floor_div(nums, divs).eval()
np_result = nums // divs
self.assertAllEqual(tf_result, np_result)
示例5: sample
#.........这里部分代码省略.........
sequence. The entries index into the Halton sequence starting with 0 and
hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
produce the first, sixth and seventh elements of the sequence. If this
parameter is None, then the `num_results` parameter must be specified
which gives the number of desired samples starting from the first sample.
dtype: (Optional) The dtype of the sample. One of `float32` or `float64`.
Default is `float32`.
randomized: (Optional) bool indicating whether to produce a randomized
Halton sequence. If True, applies the randomization described in
Owen (2017) [arXiv:1706.02808].
seed: (Optional) Python integer to seed the random number generator. Only
used if `randomized` is True. If not supplied and `randomized` is True,
no seed is set.
name: (Optional) Python `str` describing ops managed by this function. If
not supplied the name of this function is used.
Returns:
halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
and `shape` `[num_results, dim]` if `num_results` was specified or shape
`[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
were specified.
Raises:
ValueError: if both `sequence_indices` and `num_results` were specified or
if dimension `dim` is less than 1 or greater than 1000.
"""
if dim < 1 or dim > _MAX_DIMENSION:
raise ValueError(
'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
dim))
if (num_results is None) == (sequence_indices is None):
raise ValueError('Either `num_results` or `sequence_indices` must be'
' specified but not both.')
dtype = dtype or dtypes.float32
if not dtype.is_floating:
raise ValueError('dtype must be of `float`-type')
with ops.name_scope(name, 'sample', values=[sequence_indices]):
# Here and in the following, the shape layout is as follows:
# [sample dimension, event dimension, coefficient dimension].
# The coefficient dimension is an intermediate axes which will hold the
# weights of the starting integer when expressed in the (prime) base for
# an event dimension.
indices = _get_indices(num_results, sequence_indices, dtype)
radixes = array_ops.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])
max_sizes_by_axes = _base_expansion_size(math_ops.reduce_max(indices),
radixes)
max_size = math_ops.reduce_max(max_sizes_by_axes)
# The powers of the radixes that we will need. Note that there is a bit
# of an excess here. Suppose we need the place value coefficients of 7
# in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
# for base 3. However, we can only create rectangular tensors so we
# store both expansions in a [2, 3] tensor. This leads to the problem that
# we might end up attempting to raise large numbers to large powers. For
# example, base 2 expansion of 1024 has 10 digits. If we were in 10
# dimensions, then the 10th prime (29) we will end up computing 29^10 even
# though we don't need it. We avoid this by setting the exponents for each
# axes to 0 beyond the maximum value needed for that dimension.
exponents_by_axes = array_ops.tile([math_ops.range(max_size)], [dim, 1])
# The mask is true for those coefficients that are irrelevant.
weight_mask = exponents_by_axes >= max_sizes_by_axes
capped_exponents = array_ops.where(
weight_mask, array_ops.zeros_like(exponents_by_axes), exponents_by_axes)
weights = radixes ** capped_exponents
# The following computes the base b expansion of the indices. Suppose,
# x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with
# the vector (1, b, b^2, b^3, ...) will produce
# (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care
# about. Noting that all a_i < b by definition of place value expansion,
# we see that taking the elements mod b of the above vector produces the
# place value expansion coefficients.
coeffs = math_ops.floor_div(indices, weights)
coeffs *= 1 - math_ops.cast(weight_mask, dtype)
coeffs %= radixes
if not randomized:
coeffs /= radixes
return math_ops.reduce_sum(coeffs / weights, axis=-1)
coeffs = _randomize(coeffs, radixes, seed=seed)
# Remove the contribution from randomizing the trailing zero for the
# axes where max_size_by_axes < max_size. This will be accounted
# for separately below (using zero_correction).
coeffs *= 1 - math_ops.cast(weight_mask, dtype)
coeffs /= radixes
base_values = math_ops.reduce_sum(coeffs / weights, axis=-1)
# The randomization used in Owen (2017) does not leave 0 invariant. While
# we have accounted for the randomization of the first `max_size_by_axes`
# coefficients, we still need to correct for the trailing zeros. Luckily,
# this is equivalent to adding a uniform random value scaled so the first
# `max_size_by_axes` coefficients are zero. The following statements perform
# this correction.
zero_correction = random_ops.random_uniform([dim, 1], seed=seed,
dtype=dtype)
zero_correction /= (radixes ** max_sizes_by_axes)
return base_values + array_ops.reshape(zero_correction, [-1])