本文整理汇总了Python中tensorflow.python.ops.math_ops.mod函数的典型用法代码示例。如果您正苦于以下问题:Python mod函数的具体用法?Python mod怎么用?Python mod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mod函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: insert_transformed_feature
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size)
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
示例2: testFilteredElementsStats
def testFilteredElementsStats(self, dataset_transformation):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(101).filter(
lambda x: math_ops.equal(math_ops.mod(x, 3), 0))
dataset = dataset_transformation(dataset, aggregator)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
summary_t = aggregator.get_summary()
with self.test_session() as sess:
self.evaluate(iterator.initializer)
for i in range(34):
self.assertEqual(i * 3, self.evaluate(next_element))
if i is not 0:
self._assertSummaryHasScalarValue(
self.evaluate(summary_t), "Filter::dropped_elements",
float(i * 2))
self._assertSummaryHasScalarValue(
self.evaluate(summary_t), "Filter::filtered_elements", float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
self._assertSummaryHasScalarValue(
self.evaluate(summary_t), "Filter::dropped_elements", 67.0)
self._assertSummaryHasScalarValue(
self.evaluate(summary_t), "Filter::filtered_elements", 34.0)
示例3: testFilteredElementsStats
def testFilteredElementsStats(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(101).filter(
lambda x: math_ops.equal(math_ops.mod(x, 3), 0))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(34):
self.assertEqual(i * 3, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
if i != 0:
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "dropped_elements"),
float(i * 2))
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "filtered_elements"),
float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "dropped_elements"),
67.0)
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "filtered_elements"),
34.0)
示例4: rot90
def rot90(image, k=1, name=None):
"""Rotate an image counter-clockwise by 90 degrees.
Args:
image: A 3-D tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name: A name for this operation (optional).
Returns:
A rotated 3-D tensor of the same type and shape as `image`.
"""
with ops.name_scope(name, 'rot90', [image, k]) as scope:
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')
k.get_shape().assert_has_rank(0)
k = math_ops.mod(k, 4)
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(image, [1]),
[1, 0, 2])
def _rot180():
return array_ops.reverse_v2(image, [0, 1])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]),
[1])
cases = [(math_ops.equal(k, 1), _rot90),
(math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
ret = control_flow_ops.case(cases, default=lambda: image, exclusive=True,
name=scope)
ret.set_shape([None, None, image.get_shape()[2]])
return ret
示例5: testFilteredElementsStats
def testFilteredElementsStats(self, dataset_transformation):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(101).filter(
lambda x: math_ops.equal(math_ops.mod(x, 3), 0))
dataset = dataset_transformation(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(34):
self.assertEqual(i * 3, self.evaluate(next_element()))
summary_str = self.evaluate(aggregator.get_summary())
if i is not 0:
self._assertSummaryHasScalarValue(
summary_str,
self.regexForNodeName("FilterDataset", "dropped_elements"),
float(i * 2))
self._assertSummaryHasScalarValue(
summary_str,
self.regexForNodeName("FilterDataset", "filtered_elements"),
float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
summary_str = self.evaluate(aggregator.get_summary())
self._assertSummaryHasScalarValue(
summary_str, self.regexForNodeName("FilterDataset", "dropped_elements"),
67.0)
self._assertSummaryHasScalarValue(
summary_str, self.regexForNodeName("FilterDataset",
"filtered_elements"), 34.0)
示例6: gcd
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
示例7: _TileGrad
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
# Sum reduces grad along the first dimension for IndexedSlices
if isinstance(grad, ops.IndexedSlices):
grad = math_ops.unsorted_segment_sum(
grad.values,
math_ops.mod(grad.indices, input_shape[0]),
input_shape[0])
split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if not context.executing_eagerly():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
示例8: pack_uint8_r2_to_uint32
def pack_uint8_r2_to_uint32(self, test_input):
num_rows, num_columns = test_input.get_shape().as_list()
num_output_columns = int(math.ceil(num_columns / 4.0))
padding_input = array_ops.pad(
math_ops.cast(test_input, dtype=dtypes.uint8),
constant_op.constant([[
0,
0,
], [0, num_output_columns * 4 - num_columns]]))
output = array_ops.zeros([num_rows, num_output_columns],
dtype=dtypes.uint32)
num_elements_per_pack = 4
shift_bits = 8
iota_r1 = math_ops.range(num_output_columns * num_elements_per_pack)
for p in range(num_elements_per_pack):
selected_index = math_ops.equal(
math_ops.mod(iota_r1, num_elements_per_pack), p)
gather_index = array_ops.boolean_mask(iota_r1, selected_index)
gathered_input = array_ops.gather(padding_input, gather_index, axis=1)
total_shift_bits = shift_bits * (num_elements_per_pack - p - 1)
left_shift_input = bitwise_ops.left_shift(
math_ops.cast(gathered_input, dtype=dtypes.uint32), total_shift_bits)
output = bitwise_ops.bitwise_or(output, left_shift_input)
return output
示例9: _shard_indices
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.slice(keys, [0, 0], [key_shape[0].value, 1])
keys = array_ops.reshape(keys, [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
示例10: testFilterRange
def testFilterRange(self):
dataset = dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
self.assertEqual(0, sess.run(get_next))
self.assertEqual(1, sess.run(get_next))
self.assertEqual(3, sess.run(get_next))
示例11: testFloat
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
示例12: testFixed
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
示例13: _add_sinusoids_signal
def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
channels = x.get_shape().as_list()[-1]
if x.get_shape().ndims == 3: # [batch_size, timesteps, dim]
length = array_ops.shape(x)[1]
position = math_ops.to_float(math_ops.range(length))
elif x.get_shape().ndims == 2: # [batch_size, dim]
length = 1
position = math_ops.to_float(math_ops.range(time, time + 1))
else:
raise ValueError("need a Tensor with rank 2 or 3")
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(math_ops.to_float(num_timescales) - 1))
inv_timescales = min_timescale * math_ops.exp(
math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment)
scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0)
signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1)
signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]])
if x.get_shape().ndims == 3:
signal = array_ops.reshape(signal, [1, length, channels])
else:
signal = array_ops.reshape(signal, [1, channels])
return x + signal
示例14: do_test
def do_test(count, modulus):
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).filter(
lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
self.assertEqual([c.shape[1:] for c in components],
[shape for shape in dataset.output_shapes])
get_next = self.getNext(dataset)
for _ in range(count):
for i in [x for x in range(7) if x**2 % modulus == 0]:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
示例15: adjust_hue
def adjust_hue(image, delta, name=None):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float
representation, converts it to HSV, add an offset to the hue channel, converts
back to RGB and then back to the original data type. If several adjustments
are chained it is advisable to minimize the number of redundant conversions.
`image` is an RGB image. The image hue is adjusted by converting the
image to HSV and rotating the hue channel (H) by
`delta`. The image is then converted back to RGB.
`delta` must be in the interval `[-1, 1]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta: float. How much to add to the hue channel.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hue', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = convert_image_dtype(image, dtypes.float32)
# TODO(zhengxq): we will switch to the fused version after we add a GPU
# kernel for that.
fused = os.environ.get('TF_ADJUST_HUE_FUSED', '')
fused = fused.lower() in ('true', 't', '1')
if not fused:
hsv = gen_image_ops.rgb_to_hsv(flt_image)
hue = array_ops.slice(hsv, [0, 0, 0], [-1, -1, 1])
saturation = array_ops.slice(hsv, [0, 0, 1], [-1, -1, 1])
value = array_ops.slice(hsv, [0, 0, 2], [-1, -1, 1])
# Note that we add 2*pi to guarantee that the resulting hue is a positive
# floating point number since delta is [-0.5, 0.5].
hue = math_ops.mod(hue + (delta + 1.), 1.)
hsv_altered = array_ops.concat_v2([hue, saturation, value], 2)
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
else:
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
return convert_image_dtype(rgb_altered, orig_dtype)