本文整理汇总了Python中keras.backend.greater方法的典型用法代码示例。如果您正苦于以下问题:Python backend.greater方法的具体用法?Python backend.greater怎么用?Python backend.greater使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.greater方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_reset_mem
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def set_reset_mem(self, mem, spikes):
"""
Reset membrane potential ``mem`` array where ``spikes`` array is
nonzero.
"""
spike_idxs = k.T.nonzero(spikes)
if (hasattr(self, 'activation_str') and
self.activation_str == 'softmax'):
new = mem.copy() # k.T.set_subtensor(mem[spike_idxs], 0.)
elif self.config.get('cell', 'reset') == 'Reset by subtraction':
if self.payloads: # Experimental.
new = k.T.set_subtensor(mem[spike_idxs], 0.)
else:
pos_spike_idxs = k.T.nonzero(k.greater(spikes, 0))
neg_spike_idxs = k.T.nonzero(k.less(spikes, 0))
new = k.T.inc_subtensor(mem[pos_spike_idxs], -self.v_thresh)
new = k.T.inc_subtensor(new[neg_spike_idxs], self.v_thresh)
elif self.config.get('cell', 'reset') == 'Reset by modulo':
new = k.T.set_subtensor(mem[spike_idxs],
mem[spike_idxs] % self.v_thresh)
else: # self.config.get('cell', 'reset') == 'Reset to zero':
new = k.T.set_subtensor(mem[spike_idxs], 0.)
self.add_update([(self.mem, new)])
示例2: zero_one_rank_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def zero_one_rank_loss(y_true, y_pred):
y_true, y_pred = tensorify(y_true), tensorify(y_pred)
mask = K.greater(y_true[:, None] - y_true[:, :, None], 0)
# Count the number of mistakes (here position difference less than 0)
mask2 = K.less(y_pred[:, None] - y_pred[:, :, None], 0)
mask3 = K.equal(y_pred[:, None] - y_pred[:, :, None], 0)
# Calculate Transpositions
transpositions = tf.logical_and(mask, mask2)
transpositions = K.sum(K.cast(transpositions, dtype="float32"), axis=[1, 2])
n_objects = K.max(y_true) + 1
transpositions += (
K.sum(K.cast(mask3, dtype="float32"), axis=[1, 2]) - n_objects
) / 4.0
denominator = K.cast((n_objects * (n_objects - 1.0)) / 2.0, dtype="float32")
result = transpositions / denominator
return K.mean(result)
示例3: zero_one_rank_loss_for_scores_ties
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def zero_one_rank_loss_for_scores_ties(y_true, s_pred):
y_true, s_pred = tensorify(y_true), tensorify(s_pred)
n_objects = K.cast(K.max(y_true) + 1, dtype="float32")
mask = K.greater(y_true[:, None] - y_true[:, :, None], 0)
mask2 = K.greater(s_pred[:, None] - s_pred[:, :, None], 0)
mask3 = K.equal(s_pred[:, None] - s_pred[:, :, None], 0)
# Calculate Transpositions
transpositions = tf.logical_and(mask, mask2)
transpositions = K.sum(K.cast(transpositions, dtype="float32"), axis=[1, 2])
transpositions += (
K.sum(K.cast(mask3, dtype="float32"), axis=[1, 2]) - n_objects
) / 4.0
denominator = n_objects * (n_objects - 1.0) / 2.0
result = transpositions / denominator
return K.mean(result)
示例4: crossentropy_max_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def crossentropy_max_wrap(_m):
def crossentropy_max_core(y_true, y_pred):
"""
This function is based on the one proposed in
Il-Young Jeong and Hyungui Lim, "AUDIO TAGGING SYSTEM FOR DCASE 2018: FOCUSING ON LABEL NOISE,
DATA AUGMENTATION AND ITS EFFICIENT LEARNING", Tech Report, DCASE 2018
https://github.com/finejuly/dcase2018_task2_cochlearai
:param y_true:
:param y_pred:
:return:
"""
# hyper param
print(_m)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# compute loss for every data point
_loss = -K.sum(y_true * K.log(y_pred), axis=-1)
# threshold
t_m = K.max(_loss) * _m
_mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32'))
_loss = _loss * _mask_m
return _loss
return crossentropy_max_core
示例5: crossentropy_outlier_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def crossentropy_outlier_wrap(_l):
def crossentropy_outlier_core(y_true, y_pred):
# hyper param
print(_l)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# compute loss for every data point
_loss = -K.sum(y_true * K.log(y_pred), axis=-1)
def _get_real_median(_v):
"""
given a tensor with shape (batch_size,), compute and return the median
:param v:
:return:
"""
_val = tf.nn.top_k(_v, 33).values
return 0.5 * (_val[-1] + _val[-2])
_mean_loss, _var_loss = tf.nn.moments(_loss, axes=[0])
_median_loss = _get_real_median(_loss)
_std_loss = tf.sqrt(_var_loss)
# threshold
t_l = _median_loss + _l*_std_loss
_mask_l = 1 - (K.cast(K.greater(_loss, t_l), 'float32'))
_loss = _loss * _mask_l
return _loss
return crossentropy_outlier_core
#########################################################################
# from here on we distinguish data points in the batch, based on its origin
# we only apply robustness measures to the data points coming from the noisy subset
# Therefore, the next functions are used only when training with the entire train set
#########################################################################
示例6: crossentropy_max_origin_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def crossentropy_max_origin_wrap(_m):
def crossentropy_max_origin_core(y_true, y_pred):
# hyper param
print(_m)
# 1) determine the origin of the patch, as a boolean vector y_true_flag
# (True = patch from noisy subset)
_y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)
# 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
# attenuating factor for data points that need it (those that came with a one-hot of 100)
_mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01
# identity factor for standard one-hot vectors
_mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')
# combine 2 masks
_mask = _mask_reduce + _mask_keep
_y_true_shape = K.shape(y_true)
_mask = K.reshape(_mask, (_y_true_shape[0], 1))
# applying mask to have a valid y_true that we can use as always
y_true = y_true * _mask
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# compute loss for every data point
_loss = -K.sum(y_true * K.log(y_pred), axis=-1)
# threshold m
t_m = K.max(_loss) * _m
_mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32') * K.cast(_y_true_flag, 'float32'))
_loss = _loss * _mask_m
return _loss
return crossentropy_max_origin_core
示例7: binary_sigmoid_activation
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def binary_sigmoid_activation(self, mem):
"""Binary sigmoid activation."""
return k.T.mul(k.greater(mem, 0), self.v_thresh)
示例8: binary_tanh_activation
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def binary_tanh_activation(self, mem):
"""Binary tanh activation."""
output_spikes = k.T.mul(k.greater(mem, 0), self.v_thresh)
output_spikes += k.T.mul(k.less(mem, 0), -self.v_thresh)
return output_spikes
示例9: get_new_mem
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def get_new_mem(self):
"""Add input to membrane potential."""
# Destroy impulse if in refractory period
masked_impulse = self.impulse if self.tau_refrac == 0 else \
k.T.set_subtensor(
self.impulse[k.T.nonzero(self.refrac_until > self.time)], 0.)
# Add impulse
if clamp_var:
# Experimental: Clamp the membrane potential to zero until the
# presynaptic neurons fire at their steady-state rates. This helps
# avoid a transient response.
new_mem = theano.ifelse.ifelse(
k.less(k.mean(self.var), 1e-4) +
k.greater(self.time, self.duration / 2),
self.mem + masked_impulse, self.mem)
elif hasattr(self, 'clamp_idx'):
# Set clamp-duration by a specific delay from layer to layer.
new_mem = theano.ifelse.ifelse(k.less(self.time, self.clamp_idx),
self.mem, self.mem + masked_impulse)
elif v_clip:
# Clip membrane potential to prevent too strong accumulation.
new_mem = k.clip(self.mem + masked_impulse, -3, 3)
else:
new_mem = self.mem + masked_impulse
if self.config.getboolean('cell', 'leak'):
# Todo: Implement more flexible version of leak!
new_mem = k.T.inc_subtensor(
new_mem[k.T.nonzero(k.T.gt(new_mem, 0))], -0.1 * self.dt)
return new_mem
示例10: get_new_thresh
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def get_new_thresh(self):
"""Get new threshhold."""
thr_min = self._v_thresh / 100
thr_max = self._v_thresh
r_lim = 1 / self.dt
return thr_min + (thr_max - thr_min) * self.max_spikerate / r_lim
# return theano.ifelse.ifelse(
# k.equal(self.time / self.dt % settings['timestep_fraction'], 0) *
# k.greater(self.max_spikerate, settings['diff_to_min_rate']/1000)*
# k.greater(1 / self.dt - self.max_spikerate,
# settings['diff_to_max_rate'] / 1000),
# self.max_spikerate, self.v_thresh)
示例11: discriminator_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def discriminator_loss(y_true, y_pred):
loss = mean_squared_error(y_true, y_pred)
is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold
is_large = k.cast(is_large, k.floatx())
return loss * is_large # binary threshold the loss to prevent overtraining the discriminator
示例12: get_split_averages
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def get_split_averages(input_tensor, input_mask, indices):
# Splits input tensor into three parts based on the indices and
# returns average of values prior to index, values at the index and
# average of values after the index.
# input_tensor: (batch_size, input_length, input_dim)
# input_mask: (batch_size, input_length)
# indices: (batch_size, 1)
# (1, input_length)
length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
# (batch_size, input_length)
batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1) # (batch_size, input_length)
greater_mask = K.greater(batched_range, tiled_indices) # (batch_size, input_length)
lesser_mask = K.lesser(batched_range, tiled_indices) # (batch_size, input_length)
equal_mask = K.equal(batched_range, tiled_indices) # (batch_size, input_length)
# We also need to mask these masks using the input mask.
# (batch_size, input_length)
if input_mask is not None:
greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))
post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim)
post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1)
pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1)
return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
示例13: acc_class1
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def acc_class1(y_true, y_pred):
""" Function to estimate accuracy over the class 1 prediction. This estimation is global (i.e. abstaining samples are not removed)
Parameters
----------
y_true : keras tensor
True values to predict
y_pred : keras tensor
Prediction made by the model. It is assumed that this keras tensor includes extra columns to store the abstaining classes.
"""
# Find samples in ground truth belonging to class 1
ytrueint = K.argmax(y_true, axis=-1)
# Compute total number of ground truth samples in class 1
total_true1 = K.sum(ytrueint)
# Find samples in prediction belonging to class 1
ypredint = K.argmax(y_pred[:,:2], axis=-1)
# Find correctly predicted class 1 samples
true1_pred = K.sum(ytrueint*ypredint)
# Compute accuracy in class 1
acc = true1_pred / total_true1
# Since there are so few samples in class 1
# it is possible that ground truth does not
# have any sample in class 1, leading to a divide
# by zero and not valid accuracy
# Therefore, for the accuracy to be valid
# total_true1 should be greater than zero
# otherwise, return 0.
condition = K.greater(total_true1, 0)
return K.switch(condition, acc, K.zeros_like(acc, dtype=acc.dtype))
示例14: abs_acc_class1
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def abs_acc_class1(y_true, y_pred):
""" Function to estimate accuracy over the class 1 prediction after removing the samples where the model is abstaining
Parameters
----------
y_true : keras tensor
True values to predict
y_pred : keras tensor
Prediction made by the model. It is assumed that this keras tensor includes extra columns to store the abstaining classes.
"""
# Find locations of true 1 prediction
ytrueint = K.argmax(y_true, axis=-1)
# Find locations that are predicted (not abstained)
mask_pred = K.cast(K.not_equal(K.argmax(y_pred, axis=-1), nb_classes), 'int64')
# Compute total number of ground truth samples in class 1 filtering abstaining predictions
total_true1 = K.sum(ytrueint * mask_pred)
# matching in original class 1 after removing abstention
true1_pred = K.sum(mask_pred * ytrueint * K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), 'int64'))
# Compute accuracy in class 1
acc = true1_pred / total_true1
# Since there are so few samples in class 1
# it is possible that ground truth does not
# have any sample in class 1, leading to a divide
# by zero and not valid accuracy
# Therefore, for the accuracy to be valid
# total_true1 should be greater than zero
# otherwise, return 0.
condition = K.greater(total_true1, 0)
return K.switch(condition, acc, K.zeros_like(acc, dtype=acc.dtype))
示例15: crossentropy_reed_origin_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import greater [as 别名]
def crossentropy_reed_origin_wrap(_beta):
def crossentropy_reed_origin_core(y_true, y_pred):
# hyper param
print(_beta)
# 1) determine the origin of the patch, as a boolean vector in y_true_flag
# (True = patch from noisy subset)
_y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)
# 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
# attenuating factor for data points that need it (those that came with a one-hot of 100)
_mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01
# identity factor for standard one-hot vectors
_mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')
# combine 2 masks
_mask = _mask_reduce + _mask_keep
_y_true_shape = K.shape(y_true)
_mask = K.reshape(_mask, (_y_true_shape[0], 1))
# applying mask to have a valid y_true that we can use as always
y_true = y_true * _mask
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
# use predicted class proba directly to generate regression targets
y_true_bootstrapped = _beta * y_true + (1 - _beta) * y_pred
# at this point we have 2 versions of y_true
# decide which target label to use for each datapoint
_mask_noisy = K.cast(_y_true_flag, 'float32') # only allows patches from noisy set
_mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32') # only allows patches from clean set
_mask_noisy = K.reshape(_mask_noisy, (_y_true_shape[0], 1))
_mask_clean = K.reshape(_mask_clean, (_y_true_shape[0], 1))
# points coming from clean set use the standard true one-hot vector. dim is (batch_size, 1)
# points coming from noisy set use the Reed bootstrapped target tensor
y_true_final = y_true * _mask_clean + y_true_bootstrapped * _mask_noisy
# (2) compute loss as always
_loss = -K.sum(y_true_final * K.log(y_pred), axis=-1)
return _loss
return crossentropy_reed_origin_core