本文整理匯總了Python中keras.backend.logsumexp方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.logsumexp方法的具體用法?Python backend.logsumexp怎麽用?Python backend.logsumexp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.backend
的用法示例。
在下文中一共展示了backend.logsumexp方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: call
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def call(self, x, reconstruction=False):
self.reconstruction = reconstruction
self.output_dim = K.int_shape(x)[-1]
cross_channel_interp = self.cross_channel_interp
y = x[:, :self.d_dim, :]
w = x[:, self.d_dim:2*self.d_dim, :]
intensity = K.exp(w)
y = tf.transpose(y, perm=[0, 2, 1])
w = tf.transpose(w, perm=[0, 2, 1])
w2 = w
w = K.tile(w[:, :, :, None], (1, 1, 1, self.d_dim))
den = K.logsumexp(w, axis=2)
w = K.exp(w2 - den)
mean = K.mean(y, axis=1)
mean = K.tile(mean[:, None, :], (1, self.output_dim, 1))
w2 = K.dot(w*(y - mean), cross_channel_interp) + mean
rep1 = tf.transpose(w2, perm=[0, 2, 1])
if reconstruction is False:
y_trans = x[:, 2*self.d_dim:3*self.d_dim, :]
y_trans = y_trans - rep1 # subtracting smooth from transient part
rep1 = tf.concat([rep1, intensity, y_trans], 1)
return rep1
示例2: step
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def step(self, input_energy_t, states, return_logZ=True):
# not in the following `prev_target_val` has shape = (B, F)
# where B = batch_size, F = output feature dim
# Note: `i` is of float32, due to the behavior of `K.rnn`
prev_target_val, i, chain_energy = states[:3]
t = K.cast(i[0, 0], dtype='int32')
if len(states) > 3:
if K.backend() == 'theano':
m = states[3][:, t:(t + 2)]
else:
m = K.tf.slice(states[3], [0, t], [-1, 2])
input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1])) # (1, F, F)*(B, 1, 1) -> (B, F, F)
if return_logZ:
energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2) # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
new_target_val = K.logsumexp(-energy, 1) # shapes: (B, F)
return new_target_val, [new_target_val, i + 1]
else:
energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
min_energy = K.min(energy, 1)
argmin_table = K.cast(K.argmin(energy, 1), K.floatx()) # cast for tf-version `K.rnn`
return argmin_table, [min_energy, i + 1]
示例3: free_energy0
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def free_energy0(x, U, mask=None):
'''Free energy without boundary potential handling.'''
initial_states = [x[:, 0, :]]
last_alpha, _ = _forward(x,
lambda B: [K.logsumexp(B, axis=1)],
initial_states,
U,
mask)
return last_alpha[:, 0]
示例4: step
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def step(self, input_energy_t, states, return_logZ=True):
# not in the following `prev_target_val` has shape = (B, F)
# where B = batch_size, F = output feature dim
# Note: `i` is of float32, due to the behavior of `K.rnn`
prev_target_val, i, chain_energy = states[:3]
t = K.cast(i[0, 0], dtype='int32')
if len(states) > 3:
if K.backend() == 'theano':
m = states[3][:, t:(t + 2)]
else:
m = K.slice(states[3], [0, t], [-1, 2])
input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
# (1, F, F)*(B, 1, 1) -> (B, F, F)
chain_energy = chain_energy * K.expand_dims(
K.expand_dims(m[:, 0] * m[:, 1]))
if return_logZ:
# shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
new_target_val = K.logsumexp(-energy, 1) # shapes: (B, F)
return new_target_val, [new_target_val, i + 1]
else:
energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
min_energy = K.min(energy, 1)
# cast for tf-version `K.rnn
argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
return argmin_table, [min_energy, i + 1]
示例5: test_logsumexp
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def test_logsumexp(self, x_np, axis, keepdims):
'''
Check if K.logsumexp works properly for values close to one.
'''
for k in BACKENDS:
x = k.variable(x_np)
assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
rtol=1e-5)
示例6: test_logsumexp_optim
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def test_logsumexp_optim(self):
'''
Check if optimization works.
'''
for k in [KTF]:
x_np = np.array([1e+4, 1e-4])
assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
1e4,
rtol=1e-5)
示例7: free_energy0
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def free_energy0(x, U, mask=None):
"""Free energy without boundary potential handling."""
initial_states = [x[:, 0, :]]
last_alpha, _ = _forward(x,
lambda B: [K.logsumexp(B, axis=1)],
initial_states,
U,
mask)
return last_alpha[:, 0]
示例8: log_norm_step
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def log_norm_step(self, inputs, states):
"""遞歸計算歸一化因子
要點:1、遞歸計算;2、用logsumexp避免溢出。
技巧:通過expand_dims來對齊張量。
"""
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans, 0) # (1, output_dim, output_dim)
output = K.logsumexp(states + trans, 1) # (batch_size, output_dim)
return output + inputs, [output + inputs]
示例9: loss
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def loss(self, y_true, y_pred): # 目標y_pred需要是one hot形式
mask = 1 - y_true[:, 1:, -1] if self.ignore_last_label else None
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
init_states = [y_pred[:, 0]] # 初始狀態
log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states, mask=mask) # 計算Z向量(對數)
log_norm = K.logsumexp(log_norm, 1, keepdims=True) # 計算Z(對數)
path_score = self.path_score(y_pred, y_true) # 計算分子(對數)
return log_norm - path_score # 即log(分子/分母)
示例10: step
# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import logsumexp [as 別名]
def step(self, input_energy_t, states, return_logZ=True):
# not in the following `prev_target_val` has shape = (B, F)
# where B = batch_size, F = output feature dim
# Note: `i` is of float32, due to the behavior of `K.rnn`
prev_target_val, i, chain_energy = states[:3]
t = K.cast(i[0, 0], dtype='int32')
if len(states) > 3:
if K.backend() == 'theano':
m = states[3][:, t:(t + 2)]
else:
m = K.tf.slice(states[3], [0, t], [-1, 2])
input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
# (1, F, F)*(B, 1, 1) -> (B, F, F)
chain_energy = chain_energy * K.expand_dims(
K.expand_dims(m[:, 0] * m[:, 1]))
if return_logZ:
# shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
new_target_val = K.logsumexp(-energy, 1) # shapes: (B, F)
return new_target_val, [new_target_val, i + 1]
else:
energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
min_energy = K.min(energy, 1)
# cast for tf-version `K.rnn
argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
return argmin_table, [min_energy, i + 1]