本文整理汇总了Python中tensorflow.reciprocal方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reciprocal方法的具体用法?Python tensorflow.reciprocal怎么用?Python tensorflow.reciprocal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reciprocal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_filter
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def _get_filter(self, data, grid, scope=None):
""" Generate an attention filter """
with tf.variable_scope(scope, 'filter', [data]):
x_offset, y_offset, log_stride, log_scale, log_gamma = tf.split(
layers.linear(data, 5, scope='parameters'), 5, axis=1)
center = self._get_center(grid, (x_offset, y_offset), tf.exp(log_stride))
scale = tf.expand_dims(tf.maximum(tf.exp(log_scale), self.epsilon), -1)
filter_x = 1 + tf.square((self.data_x - center[0]) / tf.maximum(scale, self.epsilon))
filter_y = 1 + tf.square((self.data_y - center[1]) / tf.maximum(scale, self.epsilon))
filter_x = tf.reciprocal(tf.maximum(pi * scale * filter_x, self.epsilon))
filter_y = tf.reciprocal(tf.maximum(pi * scale * filter_y, self.epsilon))
return filter_x, filter_y, tf.exp(log_gamma)
示例2: layer_normalize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def layer_normalize(tensor):
'''Apologies if I've abused this term'''
in_shape = tf.shape(tensor)
axes = list(range(1, len(tensor.shape)))
# Keep batch axis
t = tf.reduce_sum(tensor, axis=axes )
t += EPSILON
t = tf.reciprocal(t)
t = tf.check_numerics(t, "1/sum")
tensor = tf.einsum('brc,b->brc', tensor, t)
tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
return tensor
示例3: planeDepthsModule
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def planeDepthsModule(plane_parameters, width, height):
focalLength = 517.97
urange = (tf.range(width, dtype=tf.float32) / (width + 1) - 0.5) / focalLength * 641
urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
vrange = (tf.range(height, dtype=tf.float32) / (height + 1) - 0.5) / focalLength * 481
vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
ranges = tf.reshape(ranges, [-1, 3])
planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
planesD = tf.clip_by_value(planesD, 1e-5, 10)
planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))
normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
normalXYZ = tf.reciprocal(normalXYZ)
plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
plane_depths = tf.reshape(plane_depths, [height, width, -1])
plane_depths = tf.clip_by_value(plane_depths, 0, 10)
return plane_depths
示例4: planeDepthsModule
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def planeDepthsModule(plane_parameters, width, height, info):
urange = (tf.range(width, dtype=tf.float32) / (width + 1) * (info[16] + 1) - info[2]) / info[0]
urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
vrange = (tf.range(height, dtype=tf.float32) / (height + 1) * (info[17] + 1) - info[6]) / info[5]
vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
ranges = tf.reshape(ranges, [-1, 3])
planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
planesD = tf.clip_by_value(planesD, 1e-5, 10)
planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))
normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
normalXYZ = tf.reciprocal(normalXYZ)
plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
plane_depths = tf.reshape(plane_depths, [height, width, -1])
plane_depths = tf.clip_by_value(plane_depths, 0, 10)
return plane_depths
示例5: sparse_conv
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def sparse_conv(tensor,binary_mask = None,filters=32,kernel_size=3,strides=2,l2_scale=0.0):
if binary_mask == None: #first layer has no binary mask
b,h,w,c = tensor.get_shape()
channels=tf.split(tensor,c,axis=3)
#assume that if one channel has no information, all channels have no information
binary_mask = tf.where(tf.equal(channels[0], 0), tf.zeros_like(channels[0]), tf.ones_like(channels[0])) #mask should only have the size of (B,H,W,1)
features = tf.multiply(tensor,binary_mask)
features = tf.layers.conv2d(features, filters=filters, kernel_size=kernel_size, strides=(strides, strides), trainable=True, use_bias=False, padding="same",kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=l2_scale))
norm = tf.layers.conv2d(binary_mask, filters=filters,kernel_size=kernel_size,strides=(strides, strides),kernel_initializer=tf.ones_initializer(),trainable=False,use_bias=False,padding="same")
norm = tf.where(tf.equal(norm,0),tf.zeros_like(norm),tf.reciprocal(norm))
_,_,_,bias_size = norm.get_shape()
b = tf.Variable(tf.constant(0.0, shape=[bias_size]),trainable=True)
feature = tf.multiply(features,norm)+b
mask = tf.layers.max_pooling2d(binary_mask,strides = strides,pool_size=kernel_size,padding="same")
return feature,mask
示例6: laplace_coord
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def laplace_coord(pred, placeholders, block_id):
vertex = tf.concat([pred, tf.zeros([1, 3])], 0)
indices = placeholders['lape_idx'][block_id - 1][:, :8]
weights = tf.cast(placeholders['lape_idx'][block_id - 1][:, -1], tf.float32)
weights = tf.tile(tf.reshape(tf.reciprocal(weights), [-1, 1]), [1, 3])
laplace = tf.reduce_sum(tf.gather(vertex, indices), 1)
laplace = tf.subtract(pred, tf.multiply(laplace, weights))
return laplace
示例7: write
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def write(self, data):
""" Do a filtered write given the data """
if not self.write_grid:
raise ValueError('Writing is not supported')
filter_x, filter_y, gamma = self.get_filter(data, self.write_grid, scope='write/filter')
filter_y_transpose = tf.transpose(filter_y, [0, 2, 1])
window = layers.linear(data, reduce_prod(self.write_grid.size))
window = tf.reshape(window, (-1, self.write_grid.size[1], self.write_grid.size[0]))
patch = tf.matmul(filter_y_transpose, tf.matmul(window, filter_x))
return tf.reciprocal(tf.maximum(gamma, self.epsilon)) * layers.flatten(patch)
示例8: calc_normalized_adjacency
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def calc_normalized_adjacency(context, node_state):
# Aggregate via adjacency matrix with normalisation (that does not include self-edges)
adj = tf.cast(context.features["kb_adjacency"], tf.float32)
degree = tf.reduce_sum(adj, -1, keepdims=True)
inv_degree = tf.reciprocal(degree)
node_mask = tf.expand_dims(tf.sequence_mask(context.features["kb_nodes_len"], context.args["kb_node_max_len"]), -1)
inv_degree = tf.where(node_mask, inv_degree, tf.zeros(tf.shape(inv_degree)))
inv_degree = tf.where(tf.greater(degree, 0), inv_degree, tf.zeros(tf.shape(inv_degree)))
inv_degree = tf.check_numerics(inv_degree, "inv_degree")
adj_norm = inv_degree * adj
adj_norm = tf.cast(adj_norm, node_state.dtype)
adj_norm = tf.check_numerics(adj_norm, "adj_norm")
node_incoming = tf.einsum('bnw,bnm->bmw', node_state, adj_norm)
return node_incoming
示例9: _compute_tower_grads
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def _compute_tower_grads(self, tower_loss, tower_params, use_fp16=False,
loss_scale=None, colocate_gradients_with_ops=True):
"""docstring."""
if use_fp16:
assert loss_scale
scaled_loss = tf.multiply(
tower_loss,
tf.convert_to_tensor(loss_scale, dtype=tower_loss.dtype),
name="scaling_loss")
else:
scaled_loss = tower_loss
grads = tf.gradients(
scaled_loss, tower_params,
colocate_gradients_with_ops=colocate_gradients_with_ops)
assert grads
for g in grads:
assert g.dtype == tf.float32, "grad.dtype isn't fp32: %s" % g.name
# Downscale grads
for var, grad in zip(tower_params, grads):
if grad is None:
misc_utils.print_out("%s gradient is None!" % var.name)
if use_fp16:
grads = [
grad * tf.reciprocal(loss_scale) for grad in grads
]
return tower_params, grads
示例10: lossfunction
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def lossfunction(self, tweightmat, tindicator, tembeddings):
with tf.variable_scope('loss_computation') as scope:
# tembeddings: #pts x 64
sqrvals = tf.reduce_sum(tf.square(tembeddings), 1, keep_dims=True)
# sqrvals: #pts x 1
sqrvalsmat = tf.tile(sqrvals, [1, tf.shape(sqrvals)[0]])
sqrvalsmat2 = tf.add(sqrvalsmat,tf.transpose(sqrvalsmat))
distmat = tf.add(sqrvalsmat2, tf.scalar_mul(-2.0, tf.matmul(tembeddings, tf.transpose(tembeddings))))/64.0
sigmamat = tf.scalar_mul(2.0, tf.reciprocal(1.0+tf.exp(distmat)))
posnegmapping = tf.log(tf.add(tf.scalar_mul(0.5, 1.0-tindicator), tf.multiply(tindicator, sigmamat)))
wcrossentropy = tf.multiply(tf.negative(tindicator+2.0), posnegmapping)
lossval = tf.reduce_mean(wcrossentropy)
return lossval
示例11: power
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def power(x, p):
if p == 1:
return x
if p == -1:
return tf.reciprocal(x)
return tf.pow(x, p)
示例12: _apply_dropout_mask
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
binary_mask = tf.floor(random_tensor)
if normalize:
binary_mask = tf.reciprocal(keep_prob) * binary_mask
return binary_mask
示例13: get_mixture_coef
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def get_mixture_coef( self, args, output ):
# returns the tf slices containing mdn dist params
# ie, eq 18 -> 23 of http://arxiv.org/abs/1308.0850
z = output
#get the remaining parameters
last = args.nroutputvars_raw - args.nrClassOutputVars
z_eos = z[ :, 0 ]
z_eos = tf.sigmoid( z_eos ) #eos: sigmoid, eq 18
z_eod = z[ :, 1 ]
z_eod = tf.sigmoid( z_eod ) #eod: sigmoid
z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr = tf.split( z[ :, 2:last ], 6, 1 ) #eq 20: mu1, mu2: no transformation required
# process output z's into MDN parameters
# softmax all the pi's:
max_pi = tf.reduce_max( z_pi, 1, keep_dims = True )
z_pi = tf.subtract( z_pi, max_pi ) #EdJ: subtract max pi for numerical stabilization
z_pi = tf.exp( z_pi ) #eq 19
normalize_pi = tf.reciprocal( tf.reduce_sum( z_pi, 1, keep_dims = True ) )
z_pi = tf.multiply( normalize_pi, z_pi ) #19
# exponentiate the sigmas and also make corr between -1 and 1.
z_sigma1 = tf.exp( z_sigma1 ) #eq 21
z_sigma2 = tf.exp( z_sigma2 )
z_corr_tanh = tf.tanh( z_corr ) #eq 22
z_corr_tanh = .95 * z_corr_tanh #avoid -1 and 1
z_corr_tanh_adj = z_corr_tanh
return [ z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr_tanh_adj, z_eos, z_eod ]
示例14: test_basic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def test_basic(self):
with tf.Graph().as_default(), self.test_session() as sess:
rnd = np.random.RandomState(0)
x = self.get_random_tensor([18, 12], rnd=rnd)
y = tf.reciprocal(x)
self.assert_bw_fw(sess, x, y, rnd=rnd)
示例15: inv
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reciprocal [as 别名]
def inv(self):
di = tf.reciprocal(self.d)
d_col = tf.expand_dims(self.d, 1)
DiW = self.W / d_col
M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(tf.transpose(DiW), self.W)
L = tf.cholesky(M)
v = tf.transpose(tf.matrix_triangular_solve(L, tf.transpose(DiW), lower=True))
return LowRankMatNeg(di, V)