本文整理汇总了Python中keras.backend.floatx方法的典型用法代码示例。如果您正苦于以下问题:Python backend.floatx方法的具体用法?Python backend.floatx怎么用?Python backend.floatx使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.floatx方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self,x,mask=None):
conv_input,theta = x
s = theta.shape
theta = T.reshape(theta,[-1,s[2]])
m = K.not_equal(conv_input,0.)
#### For translation
trans = _trans(theta)
output = _transform_trans(trans, conv_input)
output = output * K.cast(m,K.floatx())
### For rotation
M = _fusion(theta)
output = _transform_rot(M,output)
return output
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:18,代码来源:transform_rnn.py
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
示例4: create_transformer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def create_transformer(embedding_dim: int = 768, embedding_dropout: float = 0.1,
vocab_size: int = 30000, max_len: int = 512,
trainable_pos_embedding: bool = True, num_heads: int = 12, num_layers: int = 12,
attention_dropout: float = 0.1, use_one_embedding_dropout: bool = False,
d_hid: int = 768 * 4, residual_dropout: float = 0.1,
use_attn_mask: bool = True) -> keras.Model:
vocab_size += TextEncoder.SPECIAL_COUNT
tokens = Input(batch_shape=(None, max_len), name='token_input', dtype='int32')
segment_ids = Input(batch_shape=(None, max_len), name='segment_input', dtype='int32')
pos_ids = Input(batch_shape=(None, max_len), name='position_input', dtype='int32')
attn_mask = Input(batch_shape=(None, 1, max_len, max_len), name='attention_mask_input',
dtype=K.floatx()) if use_attn_mask else None
inputs = [tokens, segment_ids, pos_ids]
embedding_layer = Embedding(embedding_dim, embedding_dropout, vocab_size, max_len, trainable_pos_embedding,
use_one_embedding_dropout)
x = embedding_layer(inputs)
for i in range(num_layers):
x = EncoderLayer(embedding_dim, num_heads, d_hid, residual_dropout,
attention_dropout, use_attn_mask, i)(x, attn_mask)
inputs = inputs + ([attn_mask] if use_attn_mask else [])
return keras.Model(inputs=inputs, outputs=x, name='Transformer')
示例5: next
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def next(self):
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.image_size)), dtype=K.floatx())
for i, j in enumerate(index_array):
x = scipy.misc.imread(self.x[j])
x = scipy.misc.imresize(x, self.image_size)
x = self.image_data_generator.random_transform(x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = image.array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_y = self.y[index_array]
return batch_x, batch_y
示例6: gen_cosine_amp
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def gen_cosine_amp(amp=100, period=1000, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1), dtype=K.floatx())
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self, x, mask=None):
if self.mode == 'maximum_likelihood':
# draw maximum likelihood sample from Bernoulli distribution
# x* = argmax_x p(x) = 1 if p(x=1) >= 0.5
# 0 otherwise
return K.round(x)
elif self.mode == 'random':
# draw random sample from Bernoulli distribution
# x* = x ~ p(x) = 1 if p(x=1) > uniform(0, 1)
# 0 otherwise
#return self.srng.binomial(size=x.shape, n=1, p=x, dtype=K.floatx())
return K.random_binomial(x.shape, p=x, dtype=K.floatx())
elif self.mode == 'mean_field':
# draw mean-field approximation sample from Bernoulli distribution
# x* = E[p(x)] = E[Bern(x; p)] = p
return x
elif self.mode == 'nrlu':
return nrlu(x)
else:
raise NotImplementedError('Unknown sample mode!')
示例8: conv_kernel_initializer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def conv_kernel_initializer(shape, dtype=K.floatx()):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
Returns:
an initialization for the variable
"""
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
示例9: add_boundary_energy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
'''Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask.'''
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
示例10: _forward
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def _forward(x, reduce_step, initial_states, U, mask=None):
'''Forward recurrence of the linear chain crf.'''
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
示例11: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False, accum_iters=1, **kwargs):
if accum_iters < 1:
raise ValueError('accum_iters must be >= 1')
super(AdamAccumulate, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations))
self.accum_iters_float = K.cast(self.accum_iters, K.floatx())
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self, x, mask=None):
eij = dot_product(x, self.W)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
weighted_input = x * K.expand_dims(a)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, a]
return result
示例13: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update_sub(p, p_t))
return self.updates
示例14: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def call(self, x, mask=None):
# size of x :[batch_size, sel_len, attention_dim]
# size of u :[batch_size, attention_dim]
# uit = tanh(xW+b)
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
示例15: labelembed_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import floatx [as 别名]
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100):
out2_prob = K.softmax(out2)
tau2_prob = K.stop_gradient(K.softmax(out2 / tau))
soft_tar = K.stop_gradient(K.softmax(tar))
L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets)
pred = K.argmax(out2, axis = -1)
mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx()))
L_o1_emb = -cross_entropy(out1, soft_tar) # pylint: disable=invalid-unary-operand-type
L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets)
L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8)) # pylint: disable=invalid-unary-operand-type
L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha)
return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re