本文整理汇总了Python中keras.backend.constant方法的典型用法代码示例。如果您正苦于以下问题:Python backend.constant方法的具体用法?Python backend.constant怎么用?Python backend.constant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.constant方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _target_class_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def _target_class_loss(
self,
target_class,
box_scores,
box_class_probs_logits):
""" Evaluate target_class_loss w.r.t. the input.
"""
box_scores = K.squeeze(box_scores, axis=0)
box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0)
import tensorflow as tf
boi_idx = tf.where(box_scores[:, target_class] > self._score)
loss_box_class_conf = tf.reduce_mean(
tf.gather(box_class_probs_logits[:, target_class], boi_idx))
# Avoid the propagation of nan
return tf.cond(
tf.is_nan(loss_box_class_conf),
lambda: tf.constant(0.),
lambda: loss_box_class_conf)
示例2: _rpn_loss_regr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def _rpn_loss_regr(y_true, y_pred):
"""
smooth L1 loss
y_ture [1][HXWX10][3] (class,regr)
y_pred [1][HXWX10][2] (reger)
"""
sigma = 9.0
cls = y_true[0, :, 0]
regr = y_true[0, :, 1:3]
regr_keep = tf.where(K.equal(cls, 1))[:, 0]
regr_true = tf.gather(regr, regr_keep)
regr_pred = tf.gather(y_pred[0], regr_keep)
diff = tf.abs(regr_true - regr_pred)
less_one = tf.cast(tf.less(diff, 1.0 / sigma), 'float32')
loss = less_one * 0.5 * diff ** 2 * sigma + tf.abs(1 - less_one) * (diff - 0.5 / sigma)
loss = K.sum(loss, axis=1)
return K.switch(tf.size(loss) > 0, K.mean(loss), K.constant(0.0))
示例3: devise_ranking_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def devise_ranking_loss(embedding, margin = 0.1):
""" The ranking loss used by DeViSE.
# Arguments:
- embedding: 2-d numpy array whose rows are class embeddings.
- margin: margin for the ranking loss.
# Returns:
a Keras loss function taking y_true and y_pred as inputs and returning a loss tensor.
"""
def _loss(y_true, y_pred):
embedding_t = K.constant(embedding.T)
true_sim = K.sum(y_true * y_pred, axis = -1)
other_sim = K.dot(y_pred, embedding_t)
return K.sum(K.relu(margin - true_sim[:,None] + other_sim), axis = -1) - margin
return _loss
示例4: fgsm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001):
adv = inp.copy()
loss = K.mean(model.output[:, 0])
grads = K.gradients(loss, model.layers[1].output)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
mask[pad_idx:pad_idx+pad_len] = 1
grads *= K.constant(mask)
iterate = K.function([model.layers[1].output], [loss, grads])
g = 0.
step = int(1/step_size)*10
for _ in range(step):
loss_value, grads_value = iterate([adv])
grads_value *= step_size
g += grads_value
adv += grads_value
#print (e, loss_value, end='\r')
if loss_value >= 0.9:
break
return adv, g, loss_value
示例5: fgsm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1):
adv = inp.copy()
loss = K.mean(model.output[:, target_class])
grads = K.gradients(loss, model.layers[1].output)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
mask[pad_idx:pad_idx+pad_len] = 1
grads *= K.constant(mask)
iterate = K.function([model.layers[1].output], [loss, grads])
g = 0.
step = int(1/step_size)*10
for _ in range(step):
loss_value, grads_value = iterate([adv])
grads_value *= step_size
g += grads_value
adv += grads_value
#print (e, loss_value, grads_value.mean(), end='\r')
if loss_value >= 0.9:
break
return adv, g, loss_value
示例6: emit_Pad
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def emit_Pad(self, IR_node, in_scope=False):
mode = IR_node.get_attr('mode', 'constant')
mode = mode.lower()
if mode == "constant":
func = "ZeroPadding"
else:
raise NotImplementedError()
dim = len(IR_node.get_attr('pads')) // 2 - 2
padding = self._convert_padding(IR_node.get_attr('pads'))
code = "{:<15} = layers.{}{}D(name='{}', padding={})({})".format(
IR_node.variable_name,
func,
dim,
IR_node.name,
padding,
self.parent_variable_name(IR_node))
return code
示例7: _emit_h_zero
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def _emit_h_zero(self, IR_node):
if not self.layers_codes.get(IR_node.pattern, None):
class_code = '''
class my_h_zero(keras.layers.Layer):
def __init__(self, **kwargs):
super(my_h_zero, self).__init__(**kwargs)
def call(self, dummy):
{:<15} = K.constant(np.full((1, {}), {}))
return {}
'''.format(IR_node.variable_name,
IR_node.get_attr('fill_size'),
IR_node.get_attr('fill_value'),
IR_node.variable_name)
self.layers_codes[IR_node.pattern] = class_code
code = "{:<15} = my_h_zero()({})".format(IR_node.variable_name, self.parent_variable_name(IR_node))
return code
示例8: _layer_Shape
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def _layer_Shape(self):
self.add_body(0, '''
def __shape(input):
return Lambda(lambda x: tf.shape(x))(input)
''')
# def _layer_Constant(self):
# self.add_body(0, '''
# class my_constant(keras.layers.Layer):
# def __init__(self, value, **kwargs):
# super(my_constant, self).__init__(**kwargs)
# self._value = value
# # the input is dummy, just for creating keras graph.
# def call(self, dummy):
# res = K.constant(self._value)
# self.output_shapes = K.int_shape(res)
# return res
# def compute_output_shape(self, input_shape):
# return self.output_shapes
# ''')
示例9: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def __init__(self, halt_epsilon=0.01, time_penalty=0.01, **kwargs):
"""
:param halt_epsilon: a small constant that allows computation to halt
after a single update (sigmoid never reaches exactly 1.0)
:param time_penalty: parameter that weights the relative cost
of computation versus error. The larger it is, the less
computational steps the network will try to make and vice versa.
The default value of 0.01 works well for Transformer.
:param kwargs: Any standard parameters for a layer in Keras (like name)
"""
self.halt_epsilon = halt_epsilon
self.time_penalty = time_penalty
self.ponder_cost = None
self.weighted_output = None
self.zeros_like_input = None
self.zeros_like_halting = None
self.ones_like_halting = None
self.halt_budget = None
self.remainder = None
self.active_steps = None
super().__init__(**kwargs)
示例10: mask_attention_if_needed
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def mask_attention_if_needed(self, dot_product):
"""
Makes sure that (when enabled) each position
(of a decoder's self-attention) cannot attend to subsequent positions.
This is achieved by assigning -inf (or some large negative number)
to all invalid connections. Later softmax will turn them into zeros.
We need this to guarantee that decoder's predictions are based
on what has happened before the position, not after.
The method does nothing if masking is turned off.
:param dot_product: scaled dot-product of Q and K after reshaping them
to 3D tensors (batch * num_heads, rows, cols)
"""
if not self.use_masking:
return dot_product
last_dims = K.int_shape(dot_product)[-2:]
low_triangle_ones = (
np.tril(np.ones(last_dims))
# to ensure proper broadcasting
.reshape((1,) + last_dims))
inverse_low_triangle = 1 - low_triangle_ones
close_to_negative_inf = -1e9
result = (
K.constant(low_triangle_ones, dtype=K.floatx()) * dot_product +
K.constant(close_to_negative_inf * inverse_low_triangle))
return result
示例11: positional_signal
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def positional_signal(hidden_size: int, length: int,
min_timescale: float = 1.0, max_timescale: float = 1e4):
"""
Helper function, constructing basic positional encoding.
The code is partially based on implementation from Tensor2Tensor library
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py
"""
if hidden_size % 2 != 0:
raise ValueError(
f"The hidden dimension of the model must be divisible by 2."
f"Currently it is {hidden_size}")
position = K.arange(0, length, dtype=K.floatx())
num_timescales = hidden_size // 2
log_timescale_increment = K.constant(
(np.log(float(max_timescale) / float(min_timescale)) /
(num_timescales - 1)),
dtype=K.floatx())
inv_timescales = (
min_timescale *
K.exp(K.arange(num_timescales, dtype=K.floatx()) *
-log_timescale_increment))
scaled_time = K.expand_dims(position, 1) * K.expand_dims(inv_timescales, 0)
signal = K.concatenate([K.sin(scaled_time), K.cos(scaled_time)], axis=1)
return K.expand_dims(signal, axis=0)
示例12: offsets_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def offsets_loss(gt_offsets, pred_offsets, dump=False):
"""オフセット回帰の損失関数
positive(gt_fg > 0)データのみ評価対象とする
Args:
gt_offsets: 正解オフセット
[R, 4]
3軸目は領域提案とアンカーのオフセット(中心、幅、高さ)。
(tx, ty, th, tw)
pred_offsets: 予測値
[R, 4].
Note:
この関数の呼び出し元はrpn_offsets_lossとhead_offsets_loss。
RPNでのRoI予測が外れると全てNegativeなBBoxとなり、結果的にhead_offsets_lossへ渡される正解データのラベルが全てNegativeとなる。
その場合、head_offsets_lossで得られる損失は0となるが、rpn_offsets_lossで得られる損失は大きくなるはずなので、
損失全体(rpn_offsets_loss + head_offsets_loss)で評価すれば適切な損失になるはず。
"""
loss = K.switch(tf.size(gt_offsets) > 0,
smooth_l1(gt_offsets, pred_offsets), tf.constant(0.0))
loss = K.mean(loss)
return loss
示例13: labels_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def labels_loss(gt, pred):
"""ラベル分類の損失関数
gt: 正解
[N, R]
2軸目はラベルを示すID
pred: 予測値(softmax済み)
[N, R, labels].
"""
# 交差エントロピー誤差
# バッチ毎の計算ではなく、全体の平均値でOK。
# 論文に以下の記載がある。
# In our current implementation (as in the released code),
# the cls term in Eqn.(1) is normalized by the mini-batch size
# (i.e., Ncls = 256) and the reg term is normalized by the number of
# anchor locations (i.e., Nreg ∼ 2, 400).
gt = K.cast(gt, 'int32')
loss = K.switch(tf.size(gt) > 0,
sparse_categorical_crossentropy(gt, pred), K.constant(0.0))
loss = K.mean(loss)
return loss
示例14: build
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def build(self, input_shape):
hadamard_size = 2 ** int(math.ceil(math.log(max(input_shape[1], self.output_dim), 2)))
self.hadamard = K.constant(
value=hadamard(hadamard_size, dtype=np.int8)[:input_shape[1], :self.output_dim])
init_scale = 1. / math.sqrt(self.output_dim)
self.scale = self.add_weight(name='scale',
shape=(1,),
initializer=Constant(init_scale),
trainable=True)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.output_dim,),
initializer=RandomUniform(-init_scale, init_scale),
trainable=True)
super(HadamardClassifier, self).build(input_shape)
示例15: yolo_head
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import constant [as 别名]
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs