本文整理汇总了Python中tensorflow.reduce_sum方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_sum方法的具体用法?Python tensorflow.reduce_sum怎么用?Python tensorflow.reduce_sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
concated_embeds_value = inputs
square_of_sum = tf.square(tf.reduce_sum(
concated_embeds_value, axis=1, keep_dims=True))
sum_of_square = tf.reduce_sum(
concated_embeds_value * concated_embeds_value, axis=1, keep_dims=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * tf.reduce_sum(cross_term, axis=2, keep_dims=False)
return cross_term
示例2: _create_autosummary_var
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
示例3: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
init = tf.random_normal(kernel_shape, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.Variable(init)
self.b = tf.Variable(
np.zeros((self.output_channels,)).astype('float32'))
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
示例4: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.init_mode == "norm":
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
init = init * self.init_scale
elif self.init_mode == "uniform_unit_scaling":
scale = np.sqrt(3. / dim)
init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
minval=-scale, maxval=scale)
else:
raise ValueError(self.init_mode)
self.W = PV(init)
if self.use_bias:
self.b = PV((np.zeros((self.num_hid,))
+ self.init_b).astype('float32'))
示例5: compute_column_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def compute_column_softmax(self, column_controller_vector, time_step):
#compute softmax over all the columns using column controller vector
column_controller_vector = tf.tile(
tf.expand_dims(column_controller_vector, 1),
[1, self.num_cols + self.num_word_cols, 1]) #max_cols * bs * d
column_controller_vector = nn_utils.apply_dropout(
column_controller_vector, self.utility.FLAGS.dropout, self.mode)
self.full_column_hidden_vectors = tf.concat(
axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors])
self.full_column_hidden_vectors += self.summary_text_entry_embeddings
self.full_column_hidden_vectors = nn_utils.apply_dropout(
self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode)
column_logits = tf.reduce_sum(
column_controller_vector * self.full_column_hidden_vectors, 2) + (
self.params["word_match_feature_column_name"] *
self.batch_column_exact_match) + self.full_column_mask
column_softmax = tf.nn.softmax(column_logits) #batch_size * max_cols
return column_softmax
示例6: get_hash_slots
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def get_hash_slots(self, query):
"""Gets hashed-to buckets for batch of queries.
Args:
query: 2-d Tensor of query vectors.
Returns:
A list of hashed-to buckets for each hash function.
"""
binary_hash = [
tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0)
for i in xrange(self.num_libraries)]
hash_slot_idxs = [
tf.reduce_sum(
tf.to_int32(binary_hash[i]) *
tf.constant([[2 ** i for i in xrange(self.num_hashes)]],
dtype=tf.int32), 1)
for i in xrange(self.num_libraries)]
return hash_slot_idxs
示例7: build_cross_entropy_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def build_cross_entropy_loss(logits, gold):
"""Constructs a cross entropy from logits and one-hot encoded gold labels.
Supports skipping rows where the gold label is the magic -1 value.
Args:
logits: float Tensor of scores.
gold: int Tensor of one-hot labels.
Returns:
cost, correct, total: the total cost, the total number of correctly
predicted labels, and the total number of valid labels.
"""
valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
gold = tf.gather(gold, valid)
logits = tf.gather(logits, valid)
correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
total = tf.size(gold)
cost = tf.reduce_sum(
tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits(
logits, tf.cast(gold, tf.int64))) / tf.cast(total, tf.float32)
return cost, correct, total
示例8: l1_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def l1_regularizer(weight=1.0, scope=None):
"""Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1Regularizer', [tensor]):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
return regularizer
示例9: l1_l2_regularizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
"""Define a L1L2 regularizer.
Args:
weight_l1: scale the L1 loss by this factor.
weight_l2: scale the L2 loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
weight_l1_t = tf.convert_to_tensor(weight_l1,
dtype=tensor.dtype.base_dtype,
name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2,
dtype=tensor.dtype.base_dtype,
name='weight_l2')
reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
name='value_l1')
reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),
name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
return regularizer
示例10: compute_mfcc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def compute_mfcc(audio, **kwargs):
"""
Compute the MFCC for a given audio waveform. This is
identical to how DeepSpeech does it, but does it all in
TensorFlow so that we can differentiate through it.
"""
batch_size, size = audio.get_shape().as_list()
audio = tf.cast(audio, tf.float32)
# 1. Pre-emphasizer, a high-pass filter
audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)
# 2. windowing into frames of 320 samples, overlapping
windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)
# 3. Take the FFT to convert to frequency space
ffted = tf.spectral.rfft(windowed, [512])
ffted = 1.0 / 512 * tf.square(tf.abs(ffted))
# 4. Compute the Mel windowing of the FFT
energy = tf.reduce_sum(ffted,axis=2)+1e-30
filters = np.load("filterbanks.npy").T
feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30
# 5. Take the DCT again, because why not
feat = tf.log(feat)
feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]
# 6. Amplify high frequencies for some reason
_,nframes,ncoeff = feat.get_shape().as_list()
n = np.arange(ncoeff)
lift = 1 + (22/2.)*np.sin(np.pi*n/22)
feat = lift*feat
width = feat.get_shape().as_list()[1]
# 7. And now stick the energy next to the features
feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
return feat
示例11: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def __init__(self, config):
entity_total = config.entity
relation_total = config.relation
batch_size = config.batch_size
size = config.hidden_size
margin = config.margin
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)
if config.L1_flag:
pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.predict = pos
else:
pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
self.predict = pos
with tf.name_scope("output"):
self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
示例12: calc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def calc(self, e, t, r):
return tf.nn.l2_normalize(e + tf.reduce_sum(e * t, 1, keep_dims = True) * r, 1)
示例13: calc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def calc(self, e, n):
norm = tf.nn.l2_normalize(n, 1)
return e - tf.reduce_sum(e * norm, 1, keep_dims = True) * norm
示例14: contrastive_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def contrastive_loss(self, y,d,batch_size):
tmp= y *tf.square(d)
#tmp= tf.mul(y,tf.square(d))
tmp2 = (1-y) *tf.square(tf.maximum((1 - d),0))
return tf.reduce_sum(tmp +tmp2)/batch_size/2
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_sum [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")