本文整理汇总了Python中tensorflow.pow方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.pow方法的具体用法?Python tensorflow.pow怎么用?Python tensorflow.pow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.pow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _apply_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def _apply_gradients(self, grads, x, optim_state):
"""Refer to parent class documentation."""
new_x = [None] * len(x)
new_optim_state = {
"t": optim_state["t"] + 1.,
"m": [None] * len(x),
"u": [None] * len(x)
}
t = new_optim_state["t"]
for i in xrange(len(x)):
g = grads[i]
m_old = optim_state["m"][i]
u_old = optim_state["u"][i]
new_optim_state["m"][i] = (
self._beta1 * m_old + (1. - self._beta1) * g)
new_optim_state["u"][i] = (
self._beta2 * u_old + (1. - self._beta2) * g * g)
m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t))
u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t))
new_x[i] = (
x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon))
return new_x, new_optim_state
示例2: scaled_dot_product_attention_simple
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def scaled_dot_product_attention_simple(q, k, v, bias, name=None):
"""Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an optional string
Returns:
A Tensor.
"""
with tf.variable_scope(
name, default_name="scaled_dot_product_attention_simple"):
scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))
logits = tf.matmul(q * scalar, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if common_layers.should_generate_summaries():
tf.summary.image(
"attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)
return tf.matmul(weights, v)
示例3: locationPE
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def locationPE(h, w, dim, outDim = -1, addBias = True):
x = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, w)), axis = -1)
y = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, h)), axis = -1)
i = tf.expand_dims(tf.to_float(tf.range(dim)), axis = 0)
peSinX = tf.sin(x / (tf.pow(10000.0, i / dim)))
peCosX = tf.cos(x / (tf.pow(10000.0, i / dim)))
peSinY = tf.sin(y / (tf.pow(10000.0, i / dim)))
peCosY = tf.cos(y / (tf.pow(10000.0, i / dim)))
peSinX = tf.tile(tf.expand_dims(peSinX, axis = 0), [h, 1, 1])
peCosX = tf.tile(tf.expand_dims(peCosX, axis = 0), [h, 1, 1])
peSinY = tf.tile(tf.expand_dims(peSinY, axis = 1), [1, w, 1])
peCosY = tf.tile(tf.expand_dims(peCosY, axis = 1), [1, w, 1])
grid = tf.concat([peSinX, peCosX, peSinY, peCosY], axis = -1)
dim *= 4
if outDim > 0:
grid = linear(grid, dim, outDim, addBias = addBias, name = "locationPE")
dim = outDim
return grid, dim
示例4: spread_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def spread_loss(labels, logits, margin, regularizer=None):
"""
Args:
labels: [batch_size, num_label].
logits: [batch_size, num_label].
margin: Integer or 1-D Tensor.
regularizer: use regularization.
Returns:
loss: Spread loss.
"""
a_target = cl.reduce_sum(labels * logits, axis=1, keepdims=True)
dist = (1 - labels) * margin - (a_target - logits)
dist = tf.pow(tf.maximum(0., dist), 2)
loss = tf.reduce_mean(tf.reduce_sum(dist, axis=-1))
if regularizer is not None:
regularizer = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss += tf.reduce_mean(regularizer)
return(loss)
示例5: margin_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def margin_loss(labels,
logits,
upper_margin=0.9,
bottom_margin=0.1,
downweight=0.5):
"""
Args:
labels: [batch_size, num_label].
logits: [batch_size, num_label].
"""
positive_selctor = tf.cast(tf.less(logits, upper_margin), tf.float32)
positive_cost = positive_selctor * labels * tf.pow(logits - upper_margin, 2)
negative_selctor = tf.cast(tf.greater(logits, bottom_margin), tf.float32)
negative_cost = negative_selctor * (1 - labels) * tf.pow(logits - bottom_margin, 2)
loss = 0.5 * positive_cost + 0.5 * downweight * negative_cost
return tf.reduce_mean(tf.reduce_sum(loss, axis=-1))
示例6: focal_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def focal_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch,h,w,c)
gt_regr (batch,h,w,c)
'''
pos_inds = tf.cast(tf.equal(gt,1.0),dtype=tf.float32)
neg_inds = 1.0-pos_inds
neg_weights = tf.pow(1.0 - gt, 4.0)
pred=tf.clip_by_value(pred, 1e-6, 1.0 - 1e-6)
pos_loss = tf.log(pred) * tf.pow(1.0 - pred, 2.0) * pos_inds
neg_loss = tf.log(1.0 - pred) * tf.pow(pred, 2.0) * neg_weights * neg_inds
num_pos = tf.reduce_sum(pos_inds)
pos_loss = tf.reduce_sum(pos_loss)
neg_loss = tf.reduce_sum(neg_loss)
loss = - (pos_loss + neg_loss) / num_pos
示例7: tune
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def tune(self, acceptance_rate, fresh_start):
def adapt_stepsize():
new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
rate1 = 1.0 / (new_step + self.t0)
new_h_bar = tf.assign(
self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
rate1 * (self.delta - acceptance_rate))
log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
rate = tf.pow(new_step, -self.kappa)
new_log_epsilon_bar = tf.assign(
self.log_epsilon_bar,
rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
self.log_epsilon_bar)
with tf.control_dependencies([new_log_epsilon_bar]):
new_log_epsilon = tf.identity(log_epsilon)
return tf.exp(new_log_epsilon)
c = tf.cond(self.adapt_step_size,
adapt_stepsize,
lambda: tf.exp(self.log_epsilon_bar))
return c
示例8: update
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def update(self, x):
# x: (chain_dims data_dims)
new_t = tf.assign(self.t, self.t + 1)
weight = (1 - self.decay) / (1 - tf.pow(self.decay, new_t))
# incr: (chain_dims data_dims)
incr = [weight * (q - mean) for q, mean in zip(x, self.mean)]
# mean: (1,...,1 data_dims)
update_mean = [mean.assign_add(
tf.reduce_mean(i, axis=self.chain_axes, keepdims=True))
for mean, i in zip(self.mean, incr)]
# var: (1,...,1 data_dims)
new_var = [
(1 - weight) * var +
tf.reduce_mean(i * (q - mean), axis=self.chain_axes,
keepdims=True)
for var, i, q, mean in zip(self.var, incr, x, update_mean)]
update_var = [tf.assign(var, n_var)
for var, n_var in zip(self.var, new_var)]
return update_var
示例9: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def call(self, inputs):
input_shape = tf.shape(inputs)
batch_size, seq_len = input_shape[0], input_shape[1]
pos_range = tf.range(-seq_len//2, seq_len//2)
if self.transform is None:
pos_feature = pos_range
elif self.transform == 'abs':
pos_feature = tf.math.abs(pos_range)
elif self.transform == 'reversed':
pos_feature = pos_range[::-1]
else:
raise ValueError('Unknown ConcatPosition transform.')
if self.power != 1:
pos_feature = tf.pow(pos_feature, self.power)
pos_feature = tf.expand_dims(pos_feature, axis=0)
pos_feature = tf.expand_dims(pos_feature, axis=-1)
pos_feature = tf.tile(pos_feature, [batch_size, 1, 1])
pos_feature = tf.dtypes.cast(pos_feature, dtype=tf.float32)
return tf.concat([pos_feature, inputs], axis=-1)
示例10: _smooth_l1_loss_base
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def _smooth_l1_loss_base(bbox_pred, bbox_targets, sigma=1.0):
'''
:param bbox_pred: [-1, 4] in RPN. [-1, cls_num+1, 4] or [-1, cls_num+1, 5] in Fast-rcnn
:param bbox_targets: shape is same as bbox_pred
:param sigma:
:return:
'''
sigma_2 = sigma**2
box_diff = bbox_pred - bbox_targets
abs_box_diff = tf.abs(box_diff)
smoothL1_sign = tf.stop_gradient(
tf.to_float(tf.less(abs_box_diff, 1. / sigma_2)))
loss_box = tf.pow(box_diff, 2) * (sigma_2 / 2.0) * smoothL1_sign \
+ (abs_box_diff - (0.5 / sigma_2)) * (1.0 - smoothL1_sign)
return loss_box
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def __init__(self, n_features, lenscale=None, p=1, variational=False,
learn_lenscale=False):
"""Create an instance of an arc cosine kernel layer."""
# Setup random weights
if variational:
kern = RBFVariational(lenscale=lenscale,
learn_lenscale=learn_lenscale)
else:
kern = RBF(lenscale=lenscale, learn_lenscale=learn_lenscale)
super().__init__(n_features=n_features, kernel=kern)
# Kernel order
assert isinstance(p, int) and p >= 0
if p == 0:
self.pfunc = tf.sign
elif p == 1:
self.pfunc = lambda x: x
else:
self.pfunc = lambda x: tf.pow(x, p)
示例12: create_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
temp = []
subspaces = []
# creates subspaces the same way it was done in AlphaShare
for input_tensor in inputs:
subspace_size = int(input_tensor.get_shape()[-1].value / 2)
subspaces.append(input_tensor[:, :subspace_size])
subspaces.append(input_tensor[:, subspace_size:])
product = tf.matmul(tf.transpose(subspaces[0]), subspaces[1])
subspaces = []
# calculate squared Frobenius norm
temp.append(tf.reduce_sum(tf.pow(product, 2)))
out_tensor = tf.reduce_sum(temp)
self.out_tensor = out_tensor
return out_tensor
示例13: add_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def add_loss(self, global_step):
'''Adds loss to the model. Sets "loss" field. initialize must have been called.'''
with tf.variable_scope('loss') as scope:
hp = self._hparams
self.mel_loss = tf.reduce_mean(tf.abs(self.mel_targets - self.mel_outputs))
l1 = tf.abs(self.linear_targets - self.linear_outputs)
# Prioritize loss for frequencies under 3000 Hz.
n_priority_freq = int(3000 / (hp.sample_rate * 0.5) * hp.num_freq)
self.linear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(l1[:,:,0:n_priority_freq])
self.loss = self.mel_loss + self.linear_loss
if hp.use_vae:
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
self.ki_loss = -0.5 * tf.reduce_sum(1 + self.log_var - tf.pow(self.mu, 2) - tf.exp(self.log_var))
vae_loss_weight = vae_weight(global_step)
self.loss += self.ki_loss * vae_loss_weight
示例14: _db_to_amp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def _db_to_amp(x):
return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)
示例15: db_to_gain
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pow [as 别名]
def db_to_gain(tensor):
""" Convert from decibel to gain in tensorflow.
:param tensor_db: Tensor to convert.
:returns: Converted tensor.
"""
return tf.pow(10., (tensor / 20.))