当前位置: 首页>>代码示例>>Python>>正文


Python backend.sparse_categorical_crossentropy方法代码示例

本文整理汇总了Python中keras.backend.sparse_categorical_crossentropy方法的典型用法代码示例。如果您正苦于以下问题:Python backend.sparse_categorical_crossentropy方法的具体用法?Python backend.sparse_categorical_crossentropy怎么用?Python backend.sparse_categorical_crossentropy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.sparse_categorical_crossentropy方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: labels_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def labels_loss(gt, pred):
    """ラベル分類の損失関数

    gt: 正解
        [N, R]
        2軸目はラベルを示すID
    pred: 予測値(softmax済み)
        [N, R, labels].
    """

    # 交差エントロピー誤差
    # バッチ毎の計算ではなく、全体の平均値でOK。
    # 論文に以下の記載がある。
    #    In our current implementation (as in the released code),
    #    the cls term in Eqn.(1) is normalized by the mini-batch size
    #    (i.e., Ncls = 256) and the reg term is normalized by the number of
    #    anchor locations (i.e., Nreg ∼ 2, 400).
    gt = K.cast(gt, 'int32')
    loss = K.switch(tf.size(gt) > 0,
                    sparse_categorical_crossentropy(gt, pred), K.constant(0.0))
    loss = K.mean(loss)
    return loss 
开发者ID:shtamura,项目名称:maskrcnn,代码行数:24,代码来源:loss.py

示例2: rpn_class_loss_graph

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:24,代码来源:model.py

示例3: rpn_class_loss_graph

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:dmechea,项目名称:PanopticSegmentation,代码行数:25,代码来源:model.py

示例4: rpn_class_loss_graph

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Crossentropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:25,代码来源:model.py

示例5: labelembed_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100):
    
    out2_prob = K.softmax(out2)
    tau2_prob = K.stop_gradient(K.softmax(out2 / tau))
    soft_tar = K.stop_gradient(K.softmax(tar))
    
    L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets)
    
    pred = K.argmax(out2, axis = -1)
    mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx()))
    L_o1_emb = -cross_entropy(out1, soft_tar)  # pylint: disable=invalid-unary-operand-type
    
    L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets)
    L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8))  # pylint: disable=invalid-unary-operand-type
    L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha)
    
    return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:19,代码来源:learn_labelembedding.py

示例6: masked_perplexity

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def masked_perplexity(y_true, y_pred):
    """
    Masked version of popular metric for evaluating performance of
    language modelling architectures. It assumes that y_pred has shape
    (batch_size, sequence_length, 2), containing both
      - the original token ids
      - and the mask (0s and 1s, indicating places where
        a word has been replaced).
    both stacked along the last dimension.
    Masked perplexity ignores all but masked words.

    More info: http://cs224d.stanford.edu/lecture_notes/LectureNotes4.pdf
    """
    y_true_value = y_true[:, :, 0]
    mask = y_true[:, :, 1]
    cross_entropy = K.sparse_categorical_crossentropy(y_true_value, y_pred)
    batch_perplexities = K.exp(
        K.sum(mask * cross_entropy, axis=-1) / (K.sum(mask, axis=-1) + 1e-6))
    return K.mean(batch_perplexities) 
开发者ID:kpot,项目名称:keras-transformer,代码行数:21,代码来源:bert.py

示例7: rpn_class_loss_graph

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    '''RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    '''
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:nearthlab,项目名称:image-segmentation,代码行数:25,代码来源:losses.py

示例8: sparse_categorical_crossentropy

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def sparse_categorical_crossentropy(gt_ids, pred_one_hot_post_softmax):
    """
    K.sparse_categorical_crossentropyだと結果がNaNになる。。。
    0割り算が発生しているかも。
    https://qiita.com/4Ui_iUrz1/items/35a8089ab0ebc98061c1
    対策として、微少値を用いてlog(0)にならないよう調整した本関数を作成。
    """
    gt_ids = log.tfprint(gt_ids, "cross:gt_ids:")
    pred_one_hot_post_softmax = log.tfprint(pred_one_hot_post_softmax,
                                            "cross:pred_one_hot_post_softmax:")

    gt_one_hot = K.one_hot(gt_ids, K.shape(pred_one_hot_post_softmax)[-1])
    gt_one_hot = log.tfprint(gt_one_hot, "cross:gt_one_hot:")

    epsilon = K.epsilon()  # 1e-07
    loss = -K.sum(
        gt_one_hot * K.log(
            tf.clip_by_value(pred_one_hot_post_softmax, epsilon, 1 - epsilon)),
        axis=-1)
    loss = log.tfprint(loss, "cross:loss:")
    return loss 
开发者ID:shtamura,项目名称:maskrcnn,代码行数:23,代码来源:loss.py

示例9: rpn_class_loss_graph

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
开发者ID:ctu-geoforall-lab,项目名称:i.ann.maskrcnn,代码行数:25,代码来源:model.py

示例10: classification_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def classification_loss(y_true, y_pred):
    return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) 
开发者ID:yyht,项目名称:BERT,代码行数:4,代码来源:train.py

示例11: _rpn_loss_cls

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def _rpn_loss_cls(y_true, y_pred):
    """
    softmax loss

    y_true [1][1][HXWX10] class
    y_pred [1][HXWX10][2] class
    """
    y_true = y_true[0][0]
    cls_keep = tf.where(tf.not_equal(y_true, -1))[:, 0]
    cls_true = tf.gather(y_true, cls_keep)
    cls_pred = tf.gather(y_pred[0], cls_keep)
    cls_true = tf.cast(cls_true, 'int64')
    # loss = K.sparse_categorical_crossentropy(cls_true,cls_pred,from_logits=True)
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=cls_true, logits=cls_pred)
    return K.switch(tf.size(loss) > 0, K.clip(K.mean(loss), 0, 10), K.constant(0.0)) 
开发者ID:GlassyWing,项目名称:text-detection-ocr,代码行数:17,代码来源:core.py

示例12: _get_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sparse_categorical_crossentropy [as 别名]
def _get_loss(args, null_token_value):
    y_pred, y_true = args

    y_true_id = K.cast(y_true, "int32")

    mask = K.cast(K.equal(y_true_id, null_token_value), K.floatx())
    mask = 1.0 - mask
    loss = K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) * mask

    # take average w.r.t. the number of unmasked entries
    return K.sum(loss) / K.sum(mask) 
开发者ID:zimmerrol,项目名称:attention-is-all-you-need-keras,代码行数:13,代码来源:model.py


注:本文中的keras.backend.sparse_categorical_crossentropy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。