当前位置: 首页>>代码示例>>Python>>正文


Python backend.relu方法代码示例

本文整理汇总了Python中keras.backend.relu方法的典型用法代码示例。如果您正苦于以下问题:Python backend.relu方法的具体用法?Python backend.relu怎么用?Python backend.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: labelembed_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100):
    
    out2_prob = K.softmax(out2)
    tau2_prob = K.stop_gradient(K.softmax(out2 / tau))
    soft_tar = K.stop_gradient(K.softmax(tar))
    
    L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets)
    
    pred = K.argmax(out2, axis = -1)
    mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx()))
    L_o1_emb = -cross_entropy(out1, soft_tar)  # pylint: disable=invalid-unary-operand-type
    
    L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets)
    L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8))  # pylint: disable=invalid-unary-operand-type
    L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha)
    
    return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:19,代码来源:learn_labelembedding.py

示例2: labelembed_model

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def labelembed_model(base_model, num_classes, **kwargs):
    
    input_ = base_model.input
    embedding = base_model.output
    
    out = keras.layers.Activation('relu')(embedding)
    out = keras.layers.BatchNormalization(name = 'embedding_bn')(out)
    out1 = keras.layers.Dense(num_classes, name = 'prob')(out)
    out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out))
    
    cls_input_ = keras.layers.Input((1,), name = 'labels')
    cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings')
    cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_))
    
    loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_])
    
    return keras.models.Model([input_, cls_input_], [embedding, out1, loss]) 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:19,代码来源:learn_labelembedding.py

示例3: identity_block

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def identity_block(input_tensor, kernel_size, filters, stage, block):
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x 
开发者ID:EmmaW8,项目名称:pOSAL,代码行数:26,代码来源:models.py

示例4: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def call(self, x, mask=None):
        # ensure the the right part is always to the right of the left
        t_right_actual = self.t_left + K.abs(self.t_right)

        if K.backend() == 'theano':
            t_left = K.pattern_broadcast(self.t_left, self.param_broadcast)
            a_left = K.pattern_broadcast(self.a_left, self.param_broadcast)
            a_right = K.pattern_broadcast(self.a_right, self.param_broadcast)
            t_right_actual = K.pattern_broadcast(t_right_actual,
                                                 self.param_broadcast)
        else:
            t_left = self.t_left
            a_left = self.a_left
            a_right = self.a_right

        y_left_and_center = t_left + K.relu(x - t_left,
                                            a_left,
                                            t_right_actual - t_left)
        y_right = K.relu(x - t_right_actual) * a_right
        return y_left_and_center + y_right 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:22,代码来源:srelu.py

示例5: model

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def model(hidden_dim=512, input_dim=28*28, sigma_regularization=1e-3, mu_regularization=1e-5, k=10,
          activation = lambda x: K.relu(x, 1.0 / 5.5)):
    """Create two layer MLP with softmax output"""
    _x = Input(shape=(input_dim,))
    layer = lambda output_dim, activation: BayesianDense(output_dim,
                                             activation=activation,
                                             W_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
                                             b_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
                                             W_regularizer=WeightRegularizer(l1=mu_regularization))

    h1 = layer(hidden_dim, activation)
    h2 = layer(hidden_dim, activation)
    y = layer(k, 'softmax')
    _y = y(h2(h1(_x)))

    m = Model(_x, _y)
    m.compile(Adam(1e-3),loss='categorical_crossentropy')
    return m 
开发者ID:bstriner,项目名称:bayesian_dense,代码行数:20,代码来源:example.py

示例6: _squeeze

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)
        x = Multiply()([inputs, x])

        return x 
开发者ID:xiaochus,项目名称:MobileNetV3,代码行数:18,代码来源:mobilenet_base.py

示例7: triplet_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def triplet_loss(X):
            # https://arxiv.org/pdf/1804.07275v1.pdf
            # Eq (1)
            features = K.int_shape(X)[-1] // 3
            p1, p2, n1 = X[...,:features], X[...,features:2*features], X[...,2*features:]
            d_p1_p2 = K.sum(K.square(p1 - p2), axis=-1, keepdims=True)
            d_p1_n1 = K.sum(K.square(p1 - n1), axis=-1, keepdims=True)
            d_p2_n1 = K.sum(K.square(p2 - n1), axis=-1, keepdims=True)
            m = 2.

            loss = K.relu(m +  d_p1_p2 - d_p1_n1 ) + K.relu(m +  d_p1_p2 - d_p2_n1)
            
            # Eq (3,4) note: lambda trade-off param confirmed to be 1e-3 by the paper authors (by email)
            loss += 1e-3 * ( \
                K.sum(K.square(p1), axis=-1, keepdims=True) + \
                K.sum(K.square(p2), axis=-1, keepdims=True) + \
                K.sum(K.square(n1), axis=-1, keepdims=True))

            return loss 
开发者ID:antorsae,项目名称:landmark-recognition-challenge,代码行数:21,代码来源:train.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def call(self, inputs):
        if self.data_format == 'channels_first':
            sq = K.mean(inputs, [2, 3])
        else:
            sq = K.mean(inputs, [1, 2])

        ex = K.dot(sq, self.kernel1)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias1)
        ex= K.relu(ex)

        ex = K.dot(ex, self.kernel2)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias2)
        ex= K.sigmoid(ex)

        if self.data_format == 'channels_first':
            ex = K.expand_dims(ex, -1)
            ex = K.expand_dims(ex, -1)
        else:
            ex = K.expand_dims(ex, 1)
            ex = K.expand_dims(ex, 1)

        return inputs * ex 
开发者ID:DingKe,项目名称:nn_playground,代码行数:26,代码来源:layers.py

示例9: model_generator

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V) 
开发者ID:bstriner,项目名称:keras-adversarial,代码行数:19,代码来源:example_gan_convolutional.py

示例10: model_discriminator

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5):
    d_input = dim_ordering_input(input_shape, name="input_x")
    nch = 512
    # nch = 128
    H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Flatten()(H)
    H = Dense(int(nch / 2))(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    d_V = Dense(1, activation='sigmoid')(H)
    return Model(d_input, d_V) 
开发者ID:bstriner,项目名称:keras-adversarial,代码行数:18,代码来源:example_gan_convolutional.py

示例11: compile

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def compile(self, optimizer, **kwargs):
        qa_model = self.get_qa_model()

        good_similarity = qa_model([self.question, self.answer_good])
        bad_similarity = qa_model([self.question, self.answer_bad])

        # loss = merge([good_similarity, bad_similarity],
        #              mode=lambda x: K.relu(self.config['margin'] - x[0] + x[1]),
        #              output_shape=lambda x: x[0])

        loss = Lambda(lambda x: K.relu(self.config['margin'] - x[0] + x[1]),
                      output_shape=lambda x: x[0])([good_similarity, bad_similarity])

        self.prediction_model = Model(inputs=[self.question, self.answer_good], outputs=good_similarity,
                                      name='prediction_model')
        self.prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs)

        self.training_model = Model(inputs=[self.question, self.answer_good, self.answer_bad], outputs=loss,
                                    name='training_model')
        self.training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs) 
开发者ID:codekansas,项目名称:keras-language-modeling,代码行数:22,代码来源:keras_models.py

示例12: reactionrnn_model

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def reactionrnn_model(weights_path, num_classes, maxlen=140):
    '''
    Builds the model architecture for textgenrnn and
    loads the pretrained weights for the model.
    '''

    input = Input(shape=(maxlen,), name='input')
    embedded = Embedding(num_classes, 100, input_length=maxlen,
                         name='embedding')(input)
    rnn = GRU(256, return_sequences=False, name='rnn')(embedded)
    output = Dense(5, name='output',
                   activation=lambda x: K.relu(x) / K.sum(K.relu(x),
                                                          axis=-1))(rnn)

    model = Model(inputs=[input], outputs=[output])
    model.load_weights(weights_path, by_name=True)
    model.compile(loss='mse', optimizer='nadam')
    return model 
开发者ID:minimaxir,项目名称:reactionrnn,代码行数:20,代码来源:reactionrnn.py

示例13: relu6

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def relu6(x):
    return K.relu(x, max_value=6) 
开发者ID:killthekitten,项目名称:kaggle-carvana-2017,代码行数:4,代码来源:mobile_net_fixed.py

示例14: relu6

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def relu6(x):
	return K.relu(x, max_value=6) 
开发者ID:bubbliiiing,项目名称:Semantic-Segmentation,代码行数:4,代码来源:mobilenet.py

示例15: skewed_absolute_error

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import relu [as 别名]
def skewed_absolute_error(y_true, y_pred, tau):
    """
    The quantile loss function for a given quantile tau:

    L(y_true, y_pred) = (tau - I(y_pred < y_true)) * (y_pred - y_true)

    Where I is the indicator function.
    """
    dy = y_pred - y_true
    return K.mean((1.0 - tau) * K.relu(dy) + tau * K.relu(-dy), axis=-1) 
开发者ID:atmtools,项目名称:typhon,代码行数:12,代码来源:qrnn.py


注:本文中的keras.backend.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。