本文整理汇总了Python中tensorflow.compat.v1.sign方法的典型用法代码示例。如果您正苦于以下问题:Python v1.sign方法的具体用法?Python v1.sign怎么用?Python v1.sign使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.sign方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _quantize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例2: _to_bfloat16_unbiased
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def _to_bfloat16_unbiased(x, noise):
"""Convert a float32 to a bfloat16 using randomized roundoff.
Args:
x: A float32 Tensor.
noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
Returns:
A float32 Tensor.
"""
x_sign = tf.sign(x)
# Make sure x is positive. If it is zero, the two candidates are identical.
x = x * x_sign + 1e-30
cand1 = tf.to_bfloat16(x)
cand1_f = tf.to_float(cand1)
# This relies on the fact that for a positive bfloat16 b,
# b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the
# next lower one. Both 1.005 and 0.995 are ballpark estimation.
cand2 = tf.to_bfloat16(
tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))
ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)
return ret * tf.to_bfloat16(x_sign)
示例3: mu_law
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def mu_law(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode.
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data.
"""
out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
out = tf.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
示例4: hamming_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def hamming_loss(preds, targets, sign=False):
"""Implements hamming loss.
Args:
preds: Tensor of predicted values.
targets: Tensor of target values.
sign (bool): Set to True if targets={-1, 1} to take the sign of preds
before calculating loss.
Returns:
A tf.metrics tuple containing the proportion of incorrect predictions and an
update op for the metric.
"""
if sign:
preds = tf.sign(preds)
equal = tf.equal(preds, tf.cast(targets, preds.dtype))
proportion_correct, update_op = tf.metrics.mean(tf.cast(equal, tf.float32))
return 1 - proportion_correct, update_op
示例5: _get_cubic_root
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
示例6: encode
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def encode(self, x, noise):
x = tf.to_float(x)
# we can't use tf.pow(..., 8.0) because of a high-error approximation
# on TPU. Instead we square three times.
x = tf.sign(x) * tf.square(tf.square(tf.square(tf.abs(x) * 128.0)))
x = _to_bfloat16_unbiased(x, noise)
return x
示例7: decode
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def decode(self, x):
x = tf.to_float(x)
# we can't use tf.pow(..., 0.125) because of a high-error approximation
# on TPU. Instead we sqrt three times.
return tf.sign(x) * (tf.sqrt(tf.sqrt(tf.sqrt(tf.abs(x)))) / 128.0)
示例8: inv_mu_law
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def inv_mu_law(x, mu=255):
"""A TF implementation of inverse Mu-Law.
Args:
x: The Mu-Law samples to decode.
mu: The Mu we used to encode these samples.
Returns:
out: The decoded data.
"""
x = tf.cast(x, tf.float32)
out = (x + 0.5) * 2. / (mu + 1)
out = tf.sign(out) / mu * ((1 + mu)**tf.abs(out) - 1)
out = tf.where(tf.equal(x, 0), x, out)
return out
示例9: inv_mu_law_numpy
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def inv_mu_law_numpy(x, mu=255.0):
"""A numpy implementation of inverse Mu-Law.
Args:
x: The Mu-Law samples to decode.
mu: The Mu we used to encode these samples.
Returns:
out: The decoded data.
"""
x = np.array(x).astype(np.float32)
out = (x + 0.5) * 2. / (mu + 1)
out = np.sign(out) / mu * ((1 + mu)**np.abs(out) - 1)
out = np.where(np.equal(x, 0), x, out)
return out
示例10: minimize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def minimize(self, loss, x, optim_state):
"""Refer to parent class documentation."""
lr = self._lr_fn(optim_state.iteration)
grads = self.gradients(loss, x)
if self._fgsm:
grads = [tf.sign(g) for g in grads]
new_x = [None] * len(x)
for i in range(len(x)):
new_x[i] = x[i] - lr * grads[i]
new_optim_state = self._State(optim_state.iteration + 1)
return new_x, new_optim_state
示例11: test_forward_sign
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ['in_data:0'], 'sign:0')
示例12: optimize_feat_grid
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sign [as 别名]
def optimize_feat_grid(self, point_coords, point_vals, steps=10000,
print_every_n_steps=1000):
"""Optimize feature grid.
Args:
point_coords: [npts, 3] point coordinates.
point_vals: [npts, 1] point values.
steps: int, number of steps for gradient descent.
print_every_n_steps: int, print every n steps.
Returns:
"""
print_every_n_steps = int(print_every_n_steps)
point_coords = point_coords.copy()
point_vals = np.sign(point_vals.copy())
if point_coords.ndim == 3:
point_coords = point_coords[0]
if point_vals.ndim == 3:
point_vals = point_vals[0]
elif point_vals.ndim == 1:
point_vals = point_vals[:, np.newaxis]
# clip
point_coords = np.clip(point_coords, self.xmin, self.xmax)
# shuffle points
seq = np.random.permutation(point_coords.shape[0])
point_coords = point_coords[seq]
point_vals = point_vals[seq]
point_coords = point_coords[np.newaxis]
point_vals = point_vals[np.newaxis]
# random point sampling function
def random_point_sample():
sid = np.random.choice(point_coords.shape[1]-self.npts+1)
eid = sid + self.npts
return point_coords[:, sid:eid], point_vals[:, sid:eid]
with self.graph.as_default():
for i in range(steps):
pc, pv = random_point_sample()
accu_, loss_, _ = self.sess.run([self.accu, self.loss, self.train_op],
feed_dict={
self.point_coords_ph: pc,
self.point_values_ph: pv})
if i % print_every_n_steps == 0:
print('Step [{:6d}] Accu: {:5.4f} Loss: {:5.4f}'.format(i,
accu_, loss_))