本文整理汇总了Python中tensorflow.python.keras.backend.sum方法的典型用法代码示例。如果您正苦于以下问题:Python backend.sum方法的具体用法?Python backend.sum怎么用?Python backend.sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.keras.backend
的用法示例。
在下文中一共展示了backend.sum方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_A_L
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def _create_A_L(self, graph, node2idx):
node_size = graph.number_of_nodes()
A_data = []
A_row_index = []
A_col_index = []
for edge in graph.edges():
v1, v2 = edge
edge_weight = graph[v1][v2].get('weight', 1)
A_data.append(edge_weight)
A_row_index.append(node2idx[v1])
A_col_index.append(node2idx[v2])
A = sp.csr_matrix((A_data, (A_row_index, A_col_index)), shape=(node_size, node_size))
A_ = sp.csr_matrix((A_data + A_data, (A_row_index + A_col_index, A_col_index + A_row_index)),
shape=(node_size, node_size))
D = sp.diags(A_.sum(axis=1).flatten().tolist()[0])
L = D - A_
return A, L
示例2: call
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def call(self, inputs, **kwargs):
inputs = inputs if isinstance(inputs, list) else [inputs]
if len(inputs) < 1 or len(inputs) > 2:
raise ValueError("AttentionLayer expect one or two inputs.")
actual_input = inputs[0]
mask = inputs[1] if len(inputs) > 1 else None
if mask is not None and not (((len(mask.shape) == 3 and mask.shape[2] == 1) or len(mask.shape) == 2)
and mask.shape[1] == self.input_length):
raise ValueError("`mask` should be of shape (batch, input_length) or (batch, input_length, 1) "
"when calling an AttentionLayer.")
assert actual_input.shape[-1] == self.attention_param.shape[0]
# (batch, input_length, input_dim) * (input_dim, 1) ==> (batch, input_length, 1)
attention_weights = K.dot(actual_input, self.attention_param)
if mask is not None:
if len(mask.shape) == 2:
mask = K.expand_dims(mask, axis=2) # (batch, input_length, 1)
mask = K.log(mask)
attention_weights += mask
attention_weights = K.softmax(attention_weights, axis=1) # (batch, input_length, 1)
result = K.sum(actual_input * attention_weights, axis=1) # (batch, input_length) [multiplication uses broadcast]
return result, attention_weights
示例3: l_2nd
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def l_2nd(beta):
def loss_2nd(y_true, y_pred):
b_ = np.ones_like(y_true)
b_[y_true != 0] = beta
x = K.square((y_true - y_pred) * b_)
t = K.sum(x, axis=-1, )
return K.mean(t)
return loss_2nd
示例4: keep_predict_loss
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def keep_predict_loss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
return K.sum(y_true * y_pred)
示例5: call
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def call(self, x, mask=None):
"""1, mask is a bool type tensor, need casting before compute.
2, mask shape in 2 dimension (batch_size, feature_dimension)
"""
if mask is not None:
mask = K.repeat(mask, x.shape[-1])
mask = tf.transpose(mask, [0, 2, 1])
mask = tf.cast(mask, tf.float32)
x = x * mask
return K.sum(x, axis=1) / K.sum(mask, axis=1)
else:
return K.mean(x, axis=1)
示例6: call
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def call(self, x, training=None):
if len(x) != 2:
raise Exception('input layers must be a list: mean and logvar')
if len(x[0].shape) != 2 or len(x[1].shape) != 2:
raise Exception('input shape is not a vector [batchSize, latentSize]')
mean = x[0]
logvar = x[1]
# trick to allow setting batch at train/eval time
if mean.shape[0].value == None or logvar.shape[0].value == None:
return mean + 0*logvar # Keras needs the *0 so the gradinent is not None
if self.reg is not None:
# kl divergence:
latent_loss = -0.5 * (1 + logvar
- K.square(mean)
- K.exp(logvar))
latent_loss = K.sum(latent_loss, axis=-1) # sum over latent dimension
latent_loss = K.mean(latent_loss, axis=0) # avg over batch
# use beta to force less usage of vector space:
latent_loss = self.beta * latent_loss
self.add_loss(latent_loss, x)
def reparameterization_trick():
epsilon = K.random_normal(shape=logvar.shape,
mean=0., stddev=1.)
stddev = K.exp(logvar*0.5)
return mean + stddev * epsilon
return K.in_train_phase(reparameterization_trick, mean + 0*logvar, training=training) # TODO figure out why this is not working in the specified tf version???
示例7: call
# 需要导入模块: from tensorflow.python.keras import backend [as 别名]
# 或者: from tensorflow.python.keras.backend import sum [as 别名]
def call(self, x, **kwargs):
self.result = K.exp(-K.sum(K.abs(x[0] - x[1]), axis=1, keepdims=True))
return self.result
# return output shape