當前位置: 首頁>>代碼示例>>Python>>正文


Python objectives.categorical_crossentropy方法代碼示例

本文整理匯總了Python中keras.objectives.categorical_crossentropy方法的典型用法代碼示例。如果您正苦於以下問題:Python objectives.categorical_crossentropy方法的具體用法?Python objectives.categorical_crossentropy怎麽用?Python objectives.categorical_crossentropy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.objectives的用法示例。


在下文中一共展示了objectives.categorical_crossentropy方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: class_loss_cls

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
開發者ID:akshaylamba,項目名稱:FasterRCNN_KERAS,代碼行數:4,代碼來源:losses.py

示例2: class_loss_cls

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def class_loss_cls(y_true, y_pred):
    return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
開發者ID:you359,項目名稱:Keras-FasterRCNN,代碼行數:4,代碼來源:losses.py

示例3: loss_function

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
開發者ID:koshian2,項目名稱:Pseudo-Label-Keras,代碼行數:8,代碼來源:mobilenet_pseudo_cifar.py

示例4: train

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def train(n_labeled_data):
    model = create_cnn()
    
    pseudo = PseudoCallback(model, n_labeled_data, min(512, n_labeled_data))

    # pretrain
    model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])
    model.fit(pseudo.X_train_labeled/255.0, to_categorical(pseudo.y_train_labeled),
              batch_size=pseudo.batch_size, epochs=30,
              validation_data=(pseudo.X_test/255.0, to_categorical(pseudo.y_test)))
    pseudo.y_train_unlabeled_prediction = np.argmax(
            model.predict(pseudo.X_train_unlabeled), axis=-1,).reshape(-1, 1)

    #main-train
    model.compile("adam", loss=pseudo.loss_function, metrics=[pseudo.accuracy])

    if not os.path.exists("result_pseudo"):
        os.mkdir("result_pseudo")

    hist = model.fit_generator(pseudo.train_generator(), steps_per_epoch=pseudo.train_steps_per_epoch,
                               validation_data=pseudo.test_generator(), callbacks=[pseudo],
                               validation_steps=pseudo.test_stepes_per_epoch, epochs=100).history
    hist["labeled_accuracy"] = pseudo.labeled_accuracy
    hist["unlabeled_accuracy"] = pseudo.unlabeled_accuracy

    with open(f"result_pseudo/history_{n_labeled_data:05}.dat", "wb") as fp:
        pickle.dump(hist, fp) 
開發者ID:koshian2,項目名稱:Pseudo-Label-Keras,代碼行數:29,代碼來源:pseudo_pretrain_cifar.py

示例5: compile_model

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def compile_model(self):
        self.model.compile(optimizer=args.optimizers,
                           loss=categorical_crossentropy,
                           metrics=args.metrics) 
開發者ID:yongzhuo,項目名稱:nlp_xiaojiang,代碼行數:6,代碼來源:keras_bert_classify_bi_lstm.py

示例6: class_loss_cls

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * categorical_crossentropy(y_true, y_pred) 
開發者ID:small-yellow-duck,項目名稱:keras-frcnn,代碼行數:4,代碼來源:losses.py

示例7: create_model

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def create_model(env, args):
    h = x = Input(shape=(None,) + env.observation_space.shape, name="x")

    # policy network
    for i in range(args.layers):
        h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="h%d" % (i + 1))(h)
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="hb")(h)
    b = TimeDistributed(Dense(1), name="b")(h)

    # advantage is additional input
    A = Input(shape=(None,))

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - args.beta * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # inputs to the model are observation and total reward,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # baseline is optimized with MSE
    model.compile(optimizer=args.optimizer, loss=[policy_gradient_loss, 'mse'])
    model.optimizer.lr = args.optimizer_lr

    return model 
開發者ID:tambetm,項目名稱:gymexperiments,代碼行數:31,代碼來源:a2c.py

示例8: policy_gradient_loss

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def policy_gradient_loss(l_sampled, l_predicted):
    return A * categorical_crossentropy(l_sampled, l_predicted)[:, np.newaxis]

# inputs to the model are obesvation and advantage,
# outputs are action probabilities and baseline 
開發者ID:tambetm,項目名稱:gymexperiments,代碼行數:7,代碼來源:pg.py

示例9: create_model

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def create_model(env, batch_size, num_steps):
    # network inputs are observations and advantages
    h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x")
    A = Input(batch_shape=(batch_size, num_steps), name="A")

    # convolutional layers
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h)
    h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h)
    h = TimeDistributed(Flatten(), name="fl")(h)

    # recurrent layer
    h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h)

    # policy network
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    b = TimeDistributed(Dense(1), name="b")(h)

    # inputs to the model are observation and advantages,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # baseline is optimized with MSE
    model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse'])

    return model 
開發者ID:tambetm,項目名稱:gymexperiments,代碼行數:36,代碼來源:a2c_atari.py

示例10: vae_loss

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def vae_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var))
    loss = xent_loss + kl_loss
    return loss  

# create the vocabulary 
開發者ID:PacktPublishing,項目名稱:Hands-On-Deep-Learning-for-Games,代碼行數:9,代碼來源:pitch-generator.py

示例11: vae_p_loss

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def vae_p_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_p - K.square(z_mean_p) - K.exp(z_log_var_p))
    loss = xent_loss + kl_loss
    return loss

# durations VAE loss 
開發者ID:PacktPublishing,項目名稱:Hands-On-Deep-Learning-for-Games,代碼行數:9,代碼來源:note-generator.py

示例12: vae_d_loss

# 需要導入模塊: from keras import objectives [as 別名]
# 或者: from keras.objectives import categorical_crossentropy [as 別名]
def vae_d_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_d - K.square(z_mean_d) - K.exp(z_log_var_d))
    loss = xent_loss + kl_loss
    return loss

# load Bach chorales 
開發者ID:PacktPublishing,項目名稱:Hands-On-Deep-Learning-for-Games,代碼行數:9,代碼來源:note-generator.py


注:本文中的keras.objectives.categorical_crossentropy方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。