当前位置: 首页>>代码示例>>Python>>正文


Python objectives.binary_crossentropy方法代码示例

本文整理汇总了Python中keras.objectives.binary_crossentropy方法的典型用法代码示例。如果您正苦于以下问题:Python objectives.binary_crossentropy方法的具体用法?Python objectives.binary_crossentropy怎么用?Python objectives.binary_crossentropy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.objectives的用法示例。


在下文中一共展示了objectives.binary_crossentropy方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: discriminator_dummy

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def discriminator_dummy(img_size, n_filters, init_lr, name='d'):    # naive unet without GAN
    # set image specifics
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]

    inputs = Input((img_height, img_width, img_ch + out_ch))

    d = Model(inputs, inputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L
    
    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
开发者ID:jaeminSon,项目名称:V-GAN,代码行数:22,代码来源:model.py

示例2: _buildEncoder

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var])) 
开发者ID:maxhodak,项目名称:keras-molecules,代码行数:26,代码来源:model.py

示例3: _buildEncoder

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
    h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
    h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
    h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
    h = Flatten(name='flatten_1')(h)
    h = Dense(435, activation='relu', name='dense_1')(h)

    def sampling(args):
      z_mean_, z_log_var_ = args
      batch_size = K.shape(z_mean_)[0]
      epsilon = K.random_normal(
          shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
      return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

    z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
    z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)

    def vae_loss(x, x_decoded_mean):
      x = K.flatten(x)
      x_decoded_mean = K.flatten(x_decoded_mean)
      xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
      kl_loss = -0.5 * K.mean(
          1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
      return xent_loss + kl_loss

    return (vae_loss, Lambda(
        sampling, output_shape=(latent_rep_size,),
        name='lambda')([z_mean, z_log_var])) 
开发者ID:deepchem,项目名称:deepchem,代码行数:30,代码来源:model.py

示例4: _vae_loss

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def _vae_loss(self,input,output):
        '''
        loss function for variational autoencoder
        '''
        input_flat = K.flatten(input)
        output_flat = K.flatten(output)
        xent_loss = self.image_size[0] * self.image_size[1] \
                    * objectives.binary_crossentropy(input_flat,output_flat)
        kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean) 
                  - K.exp(self.z_log_var), axis=-1)
        return xent_loss + kl_loss 
开发者ID:iamshang1,项目名称:Projects,代码行数:13,代码来源:conv_vae.py

示例5: discriminator_pixel

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def discriminator_pixel(img_size, n_filters, init_lr, name='d'):
    """
    discriminator network (pixel GAN)
    """
    
    # set image specifics
    k=3 # kernel size
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]
    
    inputs = Input((img_height, img_width, img_ch + out_ch))

    conv1 = Conv2D(n_filters, kernel_size=(k, k), padding="same")(inputs) 
    conv1 = LeakyReLU(0.2)(conv1)
    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding="same")(conv1) 
    conv2 = LeakyReLU(0.2)(conv2)
    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding="same")(conv2) 
    conv3 = LeakyReLU(0.2)(conv3)

    conv4 =  Conv2D(out_ch, kernel_size=(1, 1), padding="same")(conv3)
    outputs = Activation('sigmoid')(conv4)

    d = Model(inputs, outputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                           K.batch_flatten(y_pred))
        return L

    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
开发者ID:jaeminSon,项目名称:V-GAN,代码行数:37,代码来源:model.py

示例6: GAN

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def GAN(g,d,img_size,n_filters_g, n_filters_d, alpha_recip, init_lr, name='gan'):
    """
    GAN (that binds generator and discriminator)
    """
    img_h, img_w=img_size[0], img_size[1]

    img_ch=3
    seg_ch=1
    
    fundus = Input((img_h, img_w, img_ch))
    vessel = Input((img_h, img_w, seg_ch))
    
    fake_vessel=g(fundus)
    fake_pair=Concatenate(axis=3)([fundus, fake_vessel])
    
    gan=Model([fundus, vessel], d(fake_pair), name=name)

    def gan_loss(y_true, y_pred):
        y_true_flat = K.batch_flatten(y_true)
        y_pred_flat = K.batch_flatten(y_pred)

        L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
#         L_adv = objectives.mean_squared_error(y_true_flat, y_pred_flat)

        vessel_flat = K.batch_flatten(vessel)
        fake_vessel_flat = K.batch_flatten(fake_vessel)
        L_seg = objectives.binary_crossentropy(vessel_flat, fake_vessel_flat)
#         L_seg = objectives.mean_absolute_error(vessel_flat, fake_vessel_flat)

        return alpha_recip*L_adv + L_seg
    
    
    gan.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=gan_loss, metrics=['accuracy'])
        
    return gan 
开发者ID:jaeminSon,项目名称:V-GAN,代码行数:37,代码来源:model.py

示例7: vae_loss

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss 
开发者ID:aidiary,项目名称:keras-examples,代码行数:6,代码来源:mnist_vae.py

示例8: vae_loss

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def vae_loss(x, x_hat):
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss + kl_loss
    elif use_loss == 'mse':
        return mse_loss + kl_loss
    else:
        raise Expception, 'Nonknow loss!' 
开发者ID:DingKe,项目名称:nn_playground,代码行数:12,代码来源:variational_autoencoder.py

示例9: vae_loss

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def vae_loss(x, x_decoded_mean):
        xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
        return xent_loss + kl_loss 
开发者ID:jcklie,项目名称:keras-autoencoder,代码行数:6,代码来源:variational_autoencoder.py

示例10: discriminator_patch1

# 需要导入模块: from keras import objectives [as 别名]
# 或者: from keras.objectives import binary_crossentropy [as 别名]
def discriminator_patch1(img_size, n_filters, init_lr, name='d'):
    """
    discriminator network (patch GAN)
    stride 2 conv X 1
    max pooling X 2
    """
    
    # set image specifics
    k=3 # kernel size
    s=2 # stride
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]
    padding='same'#'valid'

    inputs = Input((img_height, img_width, img_ch + out_ch))

    conv1 = Conv2D(n_filters, kernel_size=(k, k), strides=(s,s), padding=padding)(inputs)
    conv1 = BatchNormalization(scale=False, axis=3)(conv1)
    conv1 = Activation('relu')(conv1)    
    conv1 = Conv2D(n_filters, kernel_size=(k, k), padding=padding)(conv1) 
    conv1 = BatchNormalization(scale=False, axis=3)(conv1)
    conv1 = Activation('relu')(conv1)    
    pool1 = MaxPooling2D(pool_size=(s, s))(conv1)
    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding=padding)(pool1) 
    conv2 = BatchNormalization(scale=False, axis=3)(conv2)
    conv2 = Activation('relu')(conv2)    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding=padding)(conv2) 
    conv2 = BatchNormalization(scale=False, axis=3)(conv2)
    conv2 = Activation('relu')(conv2)    
    pool2 = MaxPooling2D(pool_size=(s, s))(conv2)
    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding=padding)(pool2) 
    conv3 = BatchNormalization(scale=False, axis=3)(conv3)
    conv3 = Activation('relu')(conv3)    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding=padding)(conv3) 
    conv3 = BatchNormalization(scale=False, axis=3)(conv3)
    conv3 = Activation('relu')(conv3)
    
    outputs=Conv2D(out_ch, kernel_size=(1, 1), padding=padding, activation='sigmoid')(conv3)

    d = Model(inputs, outputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L

    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
开发者ID:jaeminSon,项目名称:V-GAN,代码行数:56,代码来源:model.py


注:本文中的keras.objectives.binary_crossentropy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。