本文整理汇总了Python中tensorflow.reduce_prod方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_prod方法的具体用法?Python tensorflow.reduce_prod怎么用?Python tensorflow.reduce_prod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_prod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_autosummary_var
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
示例2: gather_indices_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32)
示例3: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def build(self, input_shape=None):
self.layer.input_spec = InputSpec(shape=input_shape)
if hasattr(self.layer, 'built') and not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
# initialise p
self.p_logit = self.add_weight(name='p_logit', shape=(1,),
initializer=initializers.RandomUniform(self.init_min, self.init_max),
dtype=tf.float32, trainable=True)
self.p = tf.nn.sigmoid(self.p_logit)
tf.compat.v1.add_to_collection("LAYER_P", self.p)
# initialise regularizer / prior KL term
input_dim = tf.reduce_prod(input_shape[1:]) # we drop only last dim
weight = self.layer.kernel
kernel_regularizer = self.weight_regularizer * tf.reduce_sum(tf.square(weight)) / (1. - self.p)
dropout_regularizer = self.p * tf.math.log(self.p)
dropout_regularizer += (1. - self.p) * tf.math.log(1. - self.p)
dropout_regularizer *= self.dropout_regularizer * tf.cast(input_dim, tf.float32)
regularizer = tf.reduce_sum(kernel_regularizer + dropout_regularizer)
self.layer.add_loss(regularizer)
# Add the regularisation loss to collection.
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, regularizer)
示例4: intpow_avx2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def intpow_avx2(x, n):
"""
Calculate integer power of float (including negative) even with Tensorflow compiled with AVX2 since --fast-math
compiler flag aggressively optimize float operation which is common with AVX2 flag
:param x: identifier
:type x: tf.Tensor
:param n: an integer power (a float will be casted to integer!!)
:type n: int
:return: powered float(s)
:rtype: tf.Tensor
:History: 2018-Aug-13 - Written - Henry Leung (University of Toronto)
"""
import tensorflow as tf
# expand inputs to prepare to be tiled
expanded_inputs = tf.expand_dims(x, 1)
# we want [1, self.n]
return tf.reduce_prod(tf.tile(expanded_inputs, [1, n]), axis=-1)
示例5: tf_ms_ssim
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1,2,2,1], [1,2,2,1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1,2,2,1], [1,2,2,1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.stack(mssim, axis=0)
mcs = tf.stack(mcs, axis=0)
value = (tf.reduce_prod(mcs[0:level-1]**weight[0:level-1])*
(mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
示例6: dmi_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def dmi_loss(config, logits, labels, **kargs):
# N x C
probs = tf.exp(tf.nn.log_softmax(logits, axis=-1))
input_shape_list = bert_utils.get_shape_list(logits, expected_rank=[2])
# N x C
one_hot_labels = tf.one_hot(labels, depth=kargs.get('num_classes', 2), dtype=tf.float32)
# C x N matmul N x C
mat = tf.matmul(tf.stop_gradient(one_hot_labels), probs, transpose_a=True) #
print('==mutul informaton shape==', mat.get_shape())
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(labels))
mat_det = tf.reduce_prod(tf.abs((tf.linalg.svd(mat, compute_uv=False))))
loss = -tf.reduce_sum(tf.log(1e-10+mat_det))
return loss, per_example_loss
示例7: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def __call__(self,input_var,name=None,**kwargs) :
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
示例8: additive_walk_embedding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def additive_walk_embedding(predicate_embeddings):
"""
Takes a walk, represented by a 3D Tensor with shape (batch_size, walk_length, embedding_length),
and computes its embedding using a simple additive models.
This method is roughly equivalent to:
> walk_embedding = tf.reduce_prod(predicate_embeddings, axis=1)
:param predicate_embeddings: 3D Tensor containing the embedding of the predicates in the walk.
:return: 2D tensor of size (batch_size, embedding_length) containing the walk embeddings.
"""
batch_size, embedding_len = tf.shape(predicate_embeddings)[0], tf.shape(predicate_embeddings)[2]
# Transpose the (batch_size, walk_length, n) Tensor in a (walk_length, batch_size, n) Tensor
transposed_embedding_matrix = tf.transpose(predicate_embeddings, perm=[1, 0, 2])
# Define the initializer of the scan procedure - an all-zeros matrix
initializer = tf.zeros((batch_size, embedding_len), dtype=predicate_embeddings.dtype)
# The walk embeddings are given by the sum of the predicate embeddings
# where zero is the neutral element wrt. the element-wise sum
walk_embedding = tf.scan(lambda x, y: x + y, transposed_embedding_matrix, initializer=initializer)
# Add the initializer as the first step in the scan sequence, in case the walk has zero-length
return tf.concat(values=[tf.expand_dims(initializer, 0), walk_embedding], axis=0)[-1]
示例9: bilinear_diagonal_walk_embedding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def bilinear_diagonal_walk_embedding(predicate_embeddings):
"""
Takes a walk, represented by a 3D Tensor with shape (batch_size, walk_length, embedding_length),
and computes its embedding using a simple bilinear diagonal models.
This method is roughly equivalent to:
> walk_embedding = tf.reduce_prod(predicate_embeddings, axis=1)
:param predicate_embeddings: 3D Tensor containing the embedding of the predicates in the walk.
:return: 2D tensor of size (batch_size, embedding_length) containing the walk embeddings.
"""
batch_size, embedding_len = tf.shape(predicate_embeddings)[0], tf.shape(predicate_embeddings)[2]
# Transpose the (batch_size, walk_length, n) Tensor in a (walk_length, batch_size, n) Tensor
transposed_embedding_matrix = tf.transpose(predicate_embeddings, perm=[1, 0, 2])
# Define the initializer of the scan procedure - an all-ones matrix
# where one is the neutral element wrt. the element-wise product
initializer = tf.ones((batch_size, embedding_len), dtype=predicate_embeddings.dtype)
# The walk embeddings are given by the element-wise product of the predicate embeddings
walk_embedding = tf.scan(lambda x, y: x * y, transposed_embedding_matrix, initializer=initializer)
# Add the initializer as the first step in the scan sequence, in case the walk has zero-length
return tf.concat(values=[tf.expand_dims(initializer, 0), walk_embedding], axis=0)[-1]
示例10: hypervolume
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def hypervolume(self, reference):
"""
Autoflow method to calculate the hypervolume indicator
The hypervolume indicator is the volume of the dominated region.
:param reference: reference point to use
Should be equal or bigger than the anti-ideal point of the Pareto set
For comparing results across runs the same reference point must be used
:return: hypervolume indicator (the higher the better)
"""
min_pf = tf.reduce_min(self.front, 0, keep_dims=True)
R = tf.expand_dims(reference, 0)
pseudo_pf = tf.concat((min_pf, self.front, R), 0)
D = tf.shape(pseudo_pf)[1]
N = tf.shape(self.bounds.ub)[0]
idx = tf.tile(tf.expand_dims(tf.range(D), -1),[1, N])
ub_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.ub), idx], axis=2), [N * D, 2])
lb_idx = tf.reshape(tf.stack([tf.transpose(self.bounds.lb), idx], axis=2), [N * D, 2])
ub = tf.reshape(tf.gather_nd(pseudo_pf, ub_idx), [D, N])
lb = tf.reshape(tf.gather_nd(pseudo_pf, lb_idx), [D, N])
hv = tf.reduce_sum(tf.reduce_prod(ub - lb, 0))
return tf.reduce_prod(R - min_pf) - hv
示例11: _call_sampler
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def _call_sampler(sample_n_fn, sample_shape, name=None):
"""Reshapes vector of samples."""
with tf.name_scope(name or "call_sampler"):
sample_shape = tf.convert_to_tensor(
sample_shape, dtype=tf.int32, name="sample_shape"
)
# Ensure sample_shape is a vector (vs just a scalar).
pad = tf.cast(tf.equal(tf.rank(sample_shape), 0), tf.int32)
sample_shape = tf.reshape(
sample_shape,
tf.pad(tf.shape(sample_shape), paddings=[[pad, 0]], constant_values=1),
)
samples = sample_n_fn(tf.reduce_prod(sample_shape))
batch_event_shape = tf.shape(samples)[1:]
final_shape = tf.concat([sample_shape, batch_event_shape], 0)
return tf.reshape(samples, final_shape)
示例12: tf_ms_ssim
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
示例13: f_inter_box
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes intersection area with boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
area: [B, T]
"""
top_left_max = tf.maximum(top_left_a, top_left_b)
bot_right_min = tf.minimum(bot_right_a, bot_right_b)
ndims = tf.shape(tf.shape(top_left_a))
# Check if the resulting box is valid.
overlap = tf.to_float(top_left_max < bot_right_min)
overlap = tf.reduce_prod(overlap, ndims - 1)
area = tf.reduce_prod(bot_right_min - top_left_max, ndims - 1)
area = overlap * tf.abs(area)
return area
示例14: f_iou_box_old
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def f_iou_box_old(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes IoU of boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
iou: [B, T]
"""
inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
inter_area = tf.maximum(inter_area, 1e-6)
ndims = tf.shape(tf.shape(top_left_a))
# area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
# area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
union_area = (area_a + area_b - inter_area + 1e-5)
union_area = tf.maximum(union_area, 1e-5)
iou = inter_area / union_area
iou = tf.maximum(iou, 1e-5)
iou = tf.minimum(iou, 1.0)
return iou
示例15: get_filled_box_idx
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_prod [as 别名]
def get_filled_box_idx(idx, top_left, bot_right):
"""Fill a box with top left and bottom right coordinates.
Args:
idx: [B, T, H, W, 2] or [B, H, W, 2] or [H, W, 2]
top_left: [B, T, 2] or [B, 2] or [2]
bot_right: [B, T, 2] or [B, 2] or [2]
"""
ss = tf.shape(idx)
ndims = tf.shape(ss)
batch = tf.slice(ss, [0], ndims - 3)
coord_shape = tf.concat(0, [batch, tf.constant([1, 1, 2])])
top_left = tf.reshape(top_left, coord_shape)
bot_right = tf.reshape(bot_right, coord_shape)
lower = tf.reduce_prod(tf.to_float(idx >= top_left), ndims - 1)
upper = tf.reduce_prod(tf.to_float(idx <= bot_right), ndims - 1)
box = lower * upper
return box