当前位置: 首页>>代码示例>>Python>>正文


Python backend.tanh方法代码示例

本文整理汇总了Python中keras.backend.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python backend.tanh方法的具体用法?Python backend.tanh怎么用?Python backend.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        uit = dot_product(x, self.W)

        if self.bias:
            uit += self.b

        uit = K.tanh(uit)
        ait = dot_product(uit, self.u)

        a = K.exp(ait)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1) 
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:26,代码来源:attention_with_context.py

示例2: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        eij = dot_product(x, self.W)

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        weighted_input = x * K.expand_dims(a)

        result = K.sum(weighted_input, axis=1)

        if self.return_attention:
            return [result, a]
        return result 
开发者ID:jiujiezz,项目名称:deephlapan,代码行数:24,代码来源:attention.py

示例3: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        # size of x :[batch_size, sel_len, attention_dim]
        # size of u :[batch_size, attention_dim]
        # uit = tanh(xW+b)
        uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
        ait = K.dot(uit, self.u)
        ait = K.squeeze(ait, -1)

        ait = K.exp(ait)

        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            ait *= K.cast(mask, K.floatx())
        ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        ait = K.expand_dims(ait)
        weighted_input = x * ait
        output = K.sum(weighted_input, axis=1)

        return output 
开发者ID:shibing624,项目名称:text-classifier,代码行数:21,代码来源:attention_layer.py

示例4: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, input_tensor, mask=None):
        x = input_tensor[0]
        y = input_tensor[1]
        mask = mask[0]

        y = K.transpose(K.dot(self.W, K.transpose(y)))
        y = K.expand_dims(y, axis=-2)
        y = K.repeat_elements(y, self.steps, axis=1)
        eij = K.sum(x * y, axis=-1)

        if self.bias:
            b = K.repeat_elements(self.b, self.steps, axis=0)
            eij += b

        eij = K.tanh(eij)
        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        return a 
开发者ID:madrugado,项目名称:Attention-Based-Aspect-Extraction,代码行数:24,代码来源:my_layers.py

示例5: _cosine_distance

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def _cosine_distance(v1, v2, cosine_norm=True, eps=1e-6):
    """
    Only requires `tf.reduce_sum(v1 * v2, axis=-1)`.

    :param v1: [batch, time_steps(v1), 1, m, d]
    :param v2: [batch, 1, time_steps(v2), m, d]
    :param cosine_norm: True
    :param eps: 1e-6
    :return: [batch, time_steps(v1), time_steps(v2), m]
    """
    cosine_numerator = tf.reduce_sum(v1 * v2, axis=-1)
    if not cosine_norm:
        return K.tanh(cosine_numerator)
    v1_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v1), axis=-1), eps))
    v2_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v2), axis=-1), eps))
    return cosine_numerator / v1_norm / v2_norm 
开发者ID:NTMC-Community,项目名称:MatchZoo,代码行数:18,代码来源:multi_perspective_layer.py

示例6: __init__

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def __init__(self, nb_filters_in, nb_filters_out, nb_filters_att, nb_rows, nb_cols,
                 init='normal', inner_init='orthogonal', attentive_init='zero',
                 activation='tanh', inner_activation='sigmoid',
                 W_regularizer=None, U_regularizer=None,
                 weights=None, go_backwards=False,
                 **kwargs):
        self.nb_filters_in = nb_filters_in
        self.nb_filters_out = nb_filters_out
        self.nb_filters_att = nb_filters_att
        self.nb_rows = nb_rows
        self.nb_cols = nb_cols
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.attentive_init = initializations.get(attentive_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.initial_weights = weights
        self.go_backwards = go_backwards

        self.W_regularizer = W_regularizer
        self.U_regularizer = U_regularizer
        self.input_spec = [InputSpec(ndim=5)]

        super(AttentiveConvLSTM, self).__init__(**kwargs) 
开发者ID:marcellacornia,项目名称:sam,代码行数:26,代码来源:attentive_convlstm.py

示例7: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def step(self, x, states):
        x_shape = K.shape(x)
        h_tm1 = states[0]
        c_tm1 = states[1]

        e = self.V_a(K.tanh(self.W_a(h_tm1) + self.U_a(x)))
        a = K.reshape(K.softmax(K.batch_flatten(e)), (x_shape[0], 1, x_shape[2], x_shape[3]))
        x_tilde = x * K.repeat_elements(a, x_shape[1], 1)

        x_i = self.W_i(x_tilde)
        x_f = self.W_f(x_tilde)
        x_c = self.W_c(x_tilde)
        x_o = self.W_o(x_tilde)

        i = self.inner_activation(x_i + self.U_i(h_tm1))
        f = self.inner_activation(x_f + self.U_f(h_tm1))
        c = f * c_tm1 + i * self.activation(x_c + self.U_c(h_tm1))
        o = self.inner_activation(x_o + self.U_o(h_tm1))

        h = o * self.activation(c)
        return h, [h, c] 
开发者ID:marcellacornia,项目名称:sam,代码行数:23,代码来源:attentive_convlstm.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        features_dim = self.features_dim
        step_dim = self.step_dim

        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
                        K.reshape(self.W, (features_dim, 1))), (-1, step_dim))

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1) 
开发者ID:kermitt2,项目名称:delft,代码行数:24,代码来源:Attention.py

示例9: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, input_tensor, mask=None):
        x = input_tensor[0]
        aspect = input_tensor[1]
        mask = mask[0]

        aspect = K.transpose(K.dot(self.W, K.transpose(aspect)))
        aspect = K.expand_dims(aspect, axis=-2)
        aspect = K.repeat_elements(aspect, self.steps, axis=1)
        eij = K.sum(x*aspect, axis=-1)

        if self.bias:
            b = K.repeat_elements(self.b, self.steps, axis=0)
            eij += b

        eij = K.tanh(eij)

        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        return a 
开发者ID:ruidan,项目名称:Aspect-level-sentiment,代码行数:26,代码来源:my_layers.py

示例10: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, h, mask=None):
        h_shape = K.shape(h)
        d_w, T = h_shape[0], h_shape[1]
        
        logits = K.dot(h, self.w)  # w^T h
        logits = K.reshape(logits, (d_w, T))
        alpha = K.exp(logits - K.max(logits, axis=-1, keepdims=True))  # exp
        
        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            alpha = alpha * mask
        alpha = alpha / K.sum(alpha, axis=1, keepdims=True) # softmax
        r = K.sum(h * K.expand_dims(alpha), axis=1)  # r = h*alpha^T
        h_star = K.tanh(r)  # h^* = tanh(r)
        if self.return_attention:
            return [h_star, alpha]
        return h_star 
开发者ID:tsterbak,项目名称:keras_attention,代码行数:20,代码来源:models.py

示例11: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        uit = K.tanh(K.dot(x, self.Ws1))
        ait = K.dot(uit, self.Ws2)
        ait = K.permute_dimensions(ait, (0, 2, 1))
        A = K.softmax(ait, axis=1)
        M = K.batch_dot(A, x)
        if self.punish:
            A_T = K.permute_dimensions(A, (0, 2, 1))
            tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1])
            tile_eye = K.reshape(
                tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2])
            AA_T = K.batch_dot(A, A_T) - tile_eye
            P = K.l2_normalize(AA_T, axis=(1, 2))
            return M, P
        else:
            return M 
开发者ID:stevewyl,项目名称:nlp_toolkit,代码行数:18,代码来源:multi_dim_attention.py

示例12: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def call(self, x, mask=None):
        # MLP
        ut = K.dot(x, self.kernel)
        if self.use_bias:
            ut = K.bias_add(ut, self.bias)
        if self.activation:
            ut = K.tanh(ut)
        if self.context_kernel:
            ut = K.dot(ut, self.context_kernel)
        ut = K.squeeze(ut, axis=-1)
        # softmax
        at = K.exp(ut - K.max(ut, axis=-1, keepdims=True))
        if mask is not None:
            at *= K.cast(mask, K.floatx())
        att_weights = at / (K.sum(at, axis=1, keepdims=True) + K.epsilon())
        # output
        atx = x * K.expand_dims(att_weights, axis=-1)
        output = K.sum(atx, axis=1)
        if self.return_attention:
            return [output, att_weights]
        return output 
开发者ID:stevewyl,项目名称:nlp_toolkit,代码行数:23,代码来源:attention.py

示例13: __init__

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def __init__(self, units, h, h_dim,
                 kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal',
                 #activation='tanh', inner_activation='hard_sigmoid',
                 #W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 #dropout_W=0., dropout_U=0., 
                 **kwargs):
        self.units = units
        self.h = h[:,-1,:]
        self.h_dim = h_dim
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        #self.activation = activations.get(activation)
        #self.inner_activation = activations.get(inner_activation)
        #self.W_regularizer = regularizers.get(W_regularizer)
        #self.U_regularizer = regularizers.get(U_regularizer)
        #self.b_regularizer = regularizers.get(b_regularizer)
        #self.dropout_W = dropout_W
        #self.dropout_U = dropout_U

        #if self.dropout_W or self.dropout_U:
        #    self.uses_learning_phase = True
        super(Attention, self).__init__(**kwargs) 
开发者ID:wentaozhu,项目名称:recurrent-attention-for-QA-SQUAD-based-on-keras,代码行数:24,代码来源:rnnlayer.py

示例14: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def step(self, inputs, states):
        h_tm1 = states[0]  # previous memory
        #B_U = states[1]  # dropout matrices for recurrent units
        #B_W = states[2]
        h_tm1a = K.dot(h_tm1, self.Wa)
        eij = K.dot(K.tanh(h_tm1a + K.dot(inputs[:, :self.h_dim], self.Ua)), self.Va)
        eijs = K.repeat_elements(eij, self.h_dim, axis=1)

        #alphaij = K.softmax(eijs) # batchsize * lenh       h batchsize * lenh * ndim
        #ci = K.permute_dimensions(K.permute_dimensions(self.h, [2,0,1]) * alphaij, [1,2,0])
        #cisum = K.sum(ci, axis=1)
        cisum = eijs*inputs[:, :self.h_dim]
        #print(K.shape(cisum), cisum.shape, ci.shape, self.h.shape, alphaij.shape, x.shape)

        zr = K.sigmoid(K.dot(inputs[:, self.h_dim:], self.Wzr) + K.dot(h_tm1, self.Uzr) + K.dot(cisum, self.Czr))
        zi = zr[:, :self.units]
        ri = zr[:, self.units: 2 * self.units]
        si_ = K.tanh(K.dot(inputs[:, self.h_dim:], self.W) + K.dot(ri*h_tm1, self.U) + K.dot(cisum, self.C))
        si = (1-zi) * h_tm1 + zi * si_
        return si, [si] #h_tm1, [h_tm1] 
开发者ID:wentaozhu,项目名称:recurrent-attention-for-QA-SQUAD-based-on-keras,代码行数:22,代码来源:rnnlayer.py

示例15: _get_attention_weights

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import tanh [as 别名]
def _get_attention_weights(self, X):
        """
        Computes the attention weights for each timestep in X
        :param X: 3d-tensor (batch_size, time_steps, input_dim)
        :return: 2d-tensor (batch_size, time_steps) of attention weights
        """
        # Compute a time-wise stimulus, i.e. a stimulus for each
        # time step. For this first compute a hidden layer of
        # dimension self.context_vector_length and take the
        # similarity of this layer with self.u as the stimulus
        u_tw = K.tanh(K.dot(X, self.W))
        tw_stimulus = K.dot(u_tw, self.u)

        # Remove the last axis an apply softmax to the stimulus to
        # get a probability.
        tw_stimulus = K.reshape(tw_stimulus, (-1, tw_stimulus.shape[1]))
        att_weights = K.softmax(tw_stimulus)

        return att_weights 
开发者ID:FlorisHoogenboom,项目名称:keras-han-for-docla,代码行数:21,代码来源:layers.py


注:本文中的keras.backend.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。