本文整理汇总了Python中tensorflow.mod方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.mod方法的具体用法?Python tensorflow.mod怎么用?Python tensorflow.mod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.mod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: noise_from_step_num
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def noise_from_step_num():
"""Quantization noise equal to (phi * (step_num + 1)) mod 1.0.
Not using random_uniform here due to a problem on TPU in that random seeds
are not respected, which may cause the parameters on different replicas
to go out-of-sync.
Returns:
a float32 scalar
"""
step = tf.to_int32(tf.train.get_or_create_global_step()) + 1
phi = ((5 ** 0.5) - 1) / 2
# Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous
# due to loss of precision when the step number gets large.
# Computation in doubles does not work on TPU, so we use this complicated
# alternative computation which does not suffer from these roundoff errors.
ret = 0.0
for i in range(30):
ret += (((phi * (2 ** i)) % 1.0) # double-precision computation in python
* tf.to_float(tf.mod(step // (2 ** i), 2)))
return tf.mod(ret, 1.0)
示例2: _finish
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def _finish(self, update_ops, name_scope):
"""Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope)
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def __init__(self, position_size, hparams=None):
EmbedderBase.__init__(self, hparams=hparams)
dim = self._hparams.dim
num_timescales = dim // 2
min_timescale = self._hparams.min_timescale
max_timescale = self._hparams.max_timescale
positions = tf.to_float(tf.range(position_size, dtype=tf.int32))
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) \
* tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
self.signal = signal
示例4: add_timing_signal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def add_timing_signal(x, scope='', min_timescale=1.0, max_timescale=1.0e4):
with tf.name_scope(scope, values=[x]):
length = tf.shape(x)[1]
channels = tf.shape(x)[2]
position = tf.to_float(tf.range(length))
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1)
)
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment
)
scaled_time = (tf.expand_dims(position, 1) *
tf.expand_dims(inv_timescales, 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return x + signal
示例5: _position_encoding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def _position_encoding(position_size, dim,
min_timescale=1.0,
max_timescale=1.0e4):
position = tf.to_float(tf.range(position_size))
num_timescales = dim // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) \
* tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(dim, 2)]])
signal = tf.reshape(signal, [1, position_size, dim])
return signal
示例6: tf_angle2class
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def tf_angle2class(angle):
''' Convert continuous angle to discrete class and residual.
num_class: int scalar, number of classes N
Input:
angle: rad scalar, from 0-2pi (or -pi~pi), class center at
0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
Output:
class_id, int, among 0,1,...,N-1
residual_angle: float, a number such that
class*(2pi/N) + residual_angle = angle
'''
twopi = tf.constant(2.0 * np.pi)
angle = tf.mod(angle, twopi)
angle_per_class = twopi / tf.to_float(cfg.model.angles.num_bins)
shifted_angle = tf.mod(angle + angle_per_class / 2.0, twopi)
class_id = tf.to_int32(shifted_angle / angle_per_class)
residual_angle = shifted_angle - (tf.to_float(class_id) * angle_per_class + angle_per_class / 2.0)
return class_id[:, 0], residual_angle
示例7: shard
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def shard(ds):
"""Convert a dataset to include shard, it has same effect
with ds.shard(num_shards, index).
"""
# TODO: allow dataset shard inside a function or dataset api
# (e.g., map, parallel_interleave)
num_shards, shard_id = _get_or_create_num_shards_and_shard_id()
def filter_fn(elem_index, _):
mod_result = tf.mod(elem_index, num_shards)
return tf.equal(mod_result, shard_id)
f = ds._enumerate().filter(filter_fn)
assert f._predicate.captured_inputs[0] == num_shards
assert f._predicate.captured_inputs[1] == shard_id
tf.add_to_collection(SHARD_FILTER_PRED,
f._predicate.name)
return f.map(lambda _, elem: elem)
示例8: paired_permutations
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def paired_permutations(x):
#Ensuring the vector is flatten
#x = tf.reshape(x, [-1])
size = tf.shape(x)[0]
counter = tf.constant(0)
m0 = tf.zeros(shape=[0, 2], dtype=x.dtype)
cond = lambda i,m: i < size*size
body = lambda i,m: [i+1, tf.concat([m, tf.expand_dims(tf.stack([x[tf.to_int32(tf.div(i,size))],
x[tf.mod(i,size)]])
, axis=0)
], axis=0, name="concat_rows")
]
_, combined_values = tf.while_loop(
cond, body,
loop_vars=[counter, m0],
shape_invariants=[counter.get_shape(), tf.TensorShape([None,None])])
return combined_values
示例9: testInt32Basic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
示例10: flow_to_color
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def flow_to_color(flow, mask=None, max_flow=None):
"""Converts flow to 3-channel color image.
Args:
flow: tensor of shape [num_batch, height, width, 2].
mask: flow validity mask of shape [num_batch, height, width, 1].
"""
n = 8
num_batch, height, width, _ = tf.unstack(tf.shape(flow))
mask = tf.ones([num_batch, height, width, 1]) if mask is None else mask
flow_u, flow_v = tf.unstack(flow, axis=3)
if max_flow is not None:
max_flow = tf.maximum(max_flow, 1)
else:
max_flow = tf.reduce_max(tf.abs(flow * mask))
mag = tf.sqrt(tf.reduce_sum(tf.square(flow), 3))
angle = atan2(flow_v, flow_u)
im_h = tf.mod(angle / (2 * np.pi) + 1.0, 1.0)
im_s = tf.clip_by_value(mag * n / max_flow, 0, 1)
im_v = tf.clip_by_value(n - im_s, 0, 1)
im_hsv = tf.stack([im_h, im_s, im_v], 3)
im = tf.image.hsv_to_rgb(im_hsv)
return im * mask
示例11: position_signal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def position_signal(dimension: int, length: tf.Tensor) -> tf.Tensor:
# Code simplified and copied from github.com/tensorflow/tensor2tensor
# TODO write this down on a piece of paper and understand the code and
# compare it to the paper
positions = tf.to_float(tf.range(length))
num_timescales = dimension // 2
# see: github.com/tensorflow/tensor2tensor/blob/v1.5.5/tensor2tensor/
# layers/common_attention.py#L425
log_timescale_increment = math.log(1.0e4) / (num_timescales - 1)
inv_timescales = tf.exp(tf.range(num_timescales, dtype=tf.float32)
* -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(
inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(dimension, 2)]])
signal = tf.reshape(signal, [1, length, dimension])
return signal
示例12: ternary_encoder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def ternary_encoder(input_data):
"""Encoding and compressing the signs """
a = tf.sign(input_data) # -1, 0, 1
a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
a = tf.reshape(a,[-1])
pad_size = 4 - tf.mod(tf.size(a), 4)
pad = tf.range(0.0, pad_size)
a = tf.concat([a, pad], 0)
a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4
# encode 4 grads into 1 Byte
sum_1 = tf.add(a_split1, a_split2*4)
sum_2 = tf.add(a_split3*16, a_split4*64)
sum_all = tf.add(sum_1, sum_2)
encoded = tf.cast(sum_all, tf.uint8)
return encoded
示例13: ternary_decoder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def ternary_decoder(encoded_data, scaler, shape):
"""Decoding the signs to float format """
a = tf.cast(encoded_data, tf.int32)
a_split1 = tf.mod(a,4)
a_split2 = tf.to_int32(tf.mod(a/4,4))
a_split3 = tf.to_int32(tf.mod(a/16,4))
a_split4 = tf.to_int32(tf.mod(a/64,4))
a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
real_size = tf.reduce_prod(shape)
a = tf.to_float(a)
a = tf.gather(a, tf.range(0,real_size))
a = tf.reshape(a, shape)
a = tf.subtract(a,1)
decoded = a*scaler
return decoded
示例14: sample_k_fids_for_pid
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def sample_k_fids_for_pid(pid, all_fids, all_pids, batch_k):
""" Given a PID, select K FIDs of that specific PID. """
possible_fids = tf.boolean_mask(all_fids, tf.equal(all_pids, pid))
# The following simply uses a subset of K of the possible FIDs
# if more than, or exactly K are available. Otherwise, we first
# create a padded list of indices which contain a multiple of the
# original FID count such that all of them will be sampled equally likely.
count = tf.shape(possible_fids)[0]
padded_count = tf.cast(tf.ceil(batch_k / tf.cast(count, tf.float32)), tf.int32) * count
full_range = tf.mod(tf.range(padded_count), count)
# Sampling is always performed by shuffling and taking the first k.
shuffled = tf.random_shuffle(full_range)
selected_fids = tf.gather(possible_fids, shuffled[:batch_k])
return selected_fids, tf.fill([batch_k], pid)
示例15: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import mod [as 别名]
def __call__(self,
input_data,
input_mask):
"""call sinusoid position layer"""
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):
input_shape = tf.shape(input_data)
length = input_shape[-2]
channel = input_shape[-1]
num_time_scale = channel // 2
position = tf.to_float(tf.range(length))
log_time_scale = tf.log(float(self.max_time_scale) / float(self.min_time_scale)) / (tf.to_float(num_time_scale) - 1)
inv_time_scale = float(self.min_time_scale) * tf.exp(-1.0 * log_time_scale * tf.to_float(tf.range(num_time_scale)))
scaled_time = tf.expand_dims(position, axis=1) * tf.expand_dims(inv_time_scale, axis=0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, paddings=[[0, 0], [0, tf.mod(channel, 2)]])
signal = tf.reshape(signal, shape=[1, length, channel])
output_signal = input_data + signal
output_mask = input_mask
return output_signal, output_mask