当前位置: 首页>>代码示例>>Python>>正文


Python activations.tanh方法代码示例

本文整理汇总了Python中keras.activations.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python activations.tanh方法的具体用法?Python activations.tanh怎么用?Python activations.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.activations的用法示例。


在下文中一共展示了activations.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: nn_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:20,代码来源:HandWritingRecognition.py

示例2: test_relu

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''

    from keras.activations import relu as r

    assert r(5) == 5
    assert r(-5) == 0
    assert r(-0.1) == 0
    assert r(0.1) == 0.1

    x = T.vector()
    exp = r(x)
    f = theano.function([x], exp)

    test_values = get_standard_values()
    result = f(test_values)

    list_assert_equal(result, test_values) # because no negatives in test values 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:23,代码来源:test_activations.py

示例3: test_tanh

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def test_tanh():

    from keras.activations import tanh as t
    test_values = get_standard_values()

    x = T.vector()
    exp = t(x)
    f = theano.function([x], exp)

    result = f(test_values)
    expected = [math.tanh(v) for v in test_values]

    print(result)
    print(expected)

    list_assert_equal(result, expected) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:18,代码来源:test_activations.py

示例4: step

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def step(self, x_input, states):
    	#print "x_input:", x_input, x_input.shape
    	# <TensorType(float32, matrix)>
    	
        input_shape = self.input_spec[0].shape
        en_seq = states[-1]
        _, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])

        # vt*tanh(W1*e+W2*d)
        dec_seq = K.repeat(h, input_shape[1])
        Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
        Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
        U = self.vt * tanh(Eij + Dij)
        U = K.squeeze(U, 2)

        # make probability tensor
        pointer = softmax(U)
        return pointer, [h, c] 
开发者ID:zygmuntz,项目名称:pointer-networks-experiments,代码行数:20,代码来源:PointerLSTM.py

示例5: _compute_energy

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def _compute_energy(self, stm):
        # "concat" energy function
        # energy_i = g * V / |V| * tanh([stm, h_i] * W + b) + r
        _stm = K.dot(stm, self.W_a)

        V_a = self.V_a
        if self.normalize_energy:
            V_a = self.Energy_g * K.l2_normalize(self.V_a)

        et = K.dot(activations.tanh(K.expand_dims(_stm, axis=1) + self._uxpb),
                   K.expand_dims(V_a))

        if self.is_monotonic:
            et += self.Energy_r

        return et 
开发者ID:asmekal,项目名称:keras-monotonic-attention,代码行数:18,代码来源:attention_decoder.py

示例6: modelGenerator

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def modelGenerator(self, name):
        inputImg = Input(shape=self.latent_dim)
        # Layer 1: 1 res block
        x = self.resblk(inputImg, 256)
        # Layer 2: 2 res block
        x = self.resblk(x, 256)
        # Layer 3: 3 res block
        x = self.resblk(x, 256)
        # Layer 4:
        x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same')(x)
        x = LeakyReLU(alpha=0.01)(x)
        # Layer 5:
        x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same')(x)
        x = LeakyReLU(alpha=0.01)(x)
        # Layer 6
        x = Conv2DTranspose(self.channels, kernel_size=1, strides=1, padding='valid')(x)
        z = Activation("tanh")(x)

        return Model(inputs=inputImg, outputs=z, name=name) 
开发者ID:simontomaskarlsson,项目名称:GAN-MRI,代码行数:21,代码来源:UNIT.py

示例7: get_initial_state

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def get_initial_state(self, inputs):
        if isinstance(inputs, list):
            assert len(inputs) == 2  # inputs == [encoder_outputs, y_true]
            encoder_outputs = inputs[0]
        else:
            encoder_outputs = inputs

        memory_shape = K.shape(encoder_outputs)

        # apply the matrix on the first time step to get the initial s0.
        s0 = activations.tanh(K.dot(encoder_outputs[:, 0], self.W_s))

        y0 = K.zeros((memory_shape[0],), dtype='int64') + self.start_token
        t0 = K.zeros((memory_shape[0],), dtype='int64')

        initial_states = [y0, s0, t0]
        if self.is_monotonic:
            # initial attention has form: [1, 0, 0, ..., 0] for each sample in batch
            alpha0 = K.ones((memory_shape[0], 1))
            alpha0 = K.switch(K.greater(memory_shape[1], 1),
                              lambda: K.concatenate([alpha0, K.zeros((memory_shape[0], memory_shape[1] - 1))], axis=-1),
                              alpha0)
            # like energy, attention is stored in shape (samples, time, 1)
            alpha0 = K.expand_dims(alpha0, -1)
            initial_states.append(alpha0)

        return initial_states 
开发者ID:asmekal,项目名称:keras-monotonic-attention,代码行数:29,代码来源:attention_decoder.py

示例8: test_serialization

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def test_serialization():
    all_activations = ['softmax', 'relu', 'elu', 'tanh',
                       'sigmoid', 'hard_sigmoid', 'linear',
                       'softplus', 'softsign', 'selu']
    for name in all_activations:
        fn = activations.get(name)
        ref_fn = getattr(activations, name)
        assert fn == ref_fn
        config = activations.serialize(fn)
        fn = activations.deserialize(config)
        assert fn == ref_fn 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:13,代码来源:activations_test.py

示例9: test_tanh

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def test_tanh():
    test_values = get_standard_values()

    x = K.placeholder(ndim=2)
    exp = activations.tanh(x)
    f = K.function([x], [exp])

    result = f([test_values])[0]
    expected = np.tanh(test_values)
    assert_allclose(result, expected, rtol=1e-05) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:12,代码来源:activations_test.py

示例10: get_initial_state

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def get_initial_state(self, inputs):
        print('inputs shape:', inputs.get_shape())

        # apply the matrix on the first time step to get the initial s0.
        s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))

        # from keras.layers.recurrent to initialize a vector of (batchsize,
        # output_dim)
        y0 = K.zeros_like(inputs)  # (samples, timesteps, input_dims)
        y0 = K.sum(y0, axis=(1, 2))  # (samples, )
        y0 = K.expand_dims(y0)  # (samples, 1)
        y0 = K.tile(y0, [1, self.output_dim])

        return [y0, s0] 
开发者ID:datalogue,项目名称:keras-attention,代码行数:16,代码来源:custom_recurrents.py

示例11: step

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def step(self, x_input, states):
        input_shape = self.input_spec[0].shape
        en_seq = states[-1]
        _, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])

        # vt*tanh(W1*e+W2*d)
        dec_seq = K.repeat(h, input_shape[1])
        Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
        Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
        U = self.vt * tanh(Eij + Dij)
        U = K.squeeze(U, 2)

        # make probability tensor
        pointer = softmax(U)
        return pointer, [h, c] 
开发者ID:keon,项目名称:pointer-networks,代码行数:17,代码来源:PointerLSTM.py

示例12: _split_and_apply_activations

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def _split_and_apply_activations(self, controller_output):
        """ This takes the controller output, splits it in ntm_output, read and wright adressing data.
            It returns a triple of ntm_output, controller_instructions_read, controller_instructions_write.
            ntm_output is a tensor, controller_instructions_read and controller_instructions_write are lists containing
            the adressing instruction (k, beta, g, shift, gamma) and in case of write also the writing constructions,
            consisting of an erase and an add vector. 

            As it is necesseary for stable results,
            k and add_vector is activated via tanh, erase_vector via sigmoid (this is critical!),
            shift via softmax,
            gamma is sigmoided, inversed and clipped (probably not ideal)
            g is sigmoided,
            beta is linear (probably not ideal!) """
        
        # splitting
        ntm_output, controller_instructions_read, controller_instructions_write = tf.split(
                    controller_output,
                    np.asarray([self.output_dim,
                                self.read_heads * self.controller_read_head_emitting_dim,
                                self.write_heads * self.controller_write_head_emitting_dim]),
                    axis=1)

        controller_instructions_read = tf.split(controller_instructions_read, self.read_heads, axis=1)
        controller_instructions_write = tf.split(controller_instructions_write, self.write_heads, axis=1)

        controller_instructions_read = [
                tf.split(single_head_data, np.asarray([self.m_depth, 1, 1, 3, 1]), axis=1) for 
                single_head_data in controller_instructions_read]
        
        controller_instructions_write = [
                tf.split(single_head_data, np.asarray([self.m_depth, 1, 1, 3, 1, self.m_depth, self.m_depth]), axis=1) for 
                single_head_data in controller_instructions_write]
        
        #activation
        ntm_output = self.activation(ntm_output)
        controller_instructions_read = [(tanh(k), hard_sigmoid(beta)+0.5, sigmoid(g), softmax(shift), 1 + 9*sigmoid(gamma)) for
                (k, beta, g, shift, gamma) in controller_instructions_read]
        controller_instructions_write = [
                (tanh(k), hard_sigmoid(beta)+0.5, sigmoid(g), softmax(shift), 1 + 9*sigmoid(gamma), hard_sigmoid(erase_vector), tanh(add_vector))  for 
                (k, beta, g, shift, gamma, erase_vector, add_vector) in controller_instructions_write]
       
        return (ntm_output, controller_instructions_read, controller_instructions_write) 
开发者ID:flomlo,项目名称:ntm_keras,代码行数:44,代码来源:ntm.py

示例13: build

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def build(self):

    dd_q_input = Input((self.config.nb_supervised_doc, self.config.doc_topk_term, 1), name='dd_q_input')
    dd_d_input = Input((self.config.nb_supervised_doc, self.config.doc_topk_term,
                        self.config.hist_size), name='dd_d_input')

    dd_q_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_q_gate')(dd_q_input)
    dd_q_w = Lambda(lambda x: softmax(x, axis=2), output_shape=(
                  self.config.nb_supervised_doc, self.config.doc_topk_term,), name='dd_q_softmax')(dd_q_w)

    z = dd_d_input
    for i in range(self.config.nb_layers):
      z = Dense(self.config.hidden_size[i], activation='tanh',
                kernel_initializer=self.initializer_fc, name='hidden')(z)
    z = Dense(self.config.out_size, kernel_initializer=self.initializer_fc, name='dd_d_gate')(z)
    z = Reshape((self.config.nb_supervised_doc, self.config.doc_topk_term,))(z)
    dd_q_w = Reshape((self.config.nb_supervised_doc, self.config.doc_topk_term,))(dd_q_w)
    # out = Dot(axes=[2, 2], name='dd_pseudo_out')([z, dd_q_w])

    out = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]), name='dd_pseudo_out')([z, dd_q_w])
    dd_init_out = Lambda(lambda x: tf.matrix_diag_part(x), output_shape=(self.config.nb_supervised_doc,), name='dd_init_out')(out)
    '''
    dd_init_out = Lambda(lambda x: tf.reduce_sum(x, axis=2), output_shape=(self.config.nb_supervised_doc,))(z)
    '''
    #dd_out = Reshape((self.config.nb_supervised_doc,))(dd_out)

    # dd out gating
    dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
    dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
    # dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)

    # dd_out = Dot(axes=[1, 1], name='dd_out')([dd_init_out, dd_w])
    dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
    dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)


    if self.config.method in [1, 3]: # no doc gating, with dense layer
      z = dd_init_out
    elif self.config.method == 2:
      logging.info("Apply doc gating")
      z = Multiply(name='dd_out')([dd_init_out, dd_w])
    else:
      raise ValueError("Method not initialized, please check config file")

    if self.config.method in [1, 2]:
      logging.info("Dense layer on top")
      z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
      out = Dense(self.config.merge_out, name='score')(z)
    else:
      logging.info("Apply doc gating, No dense layer on top, sum up scores")
      out = Dot(axes=[1, 1], name='score')([z, dd_w])

    model = Model(inputs=[dd_q_input, dd_d_input, dd_gate], outputs=[out])
    print(model.summary())

    return model 
开发者ID:ucasir,项目名称:NPRF,代码行数:58,代码来源:nprf_drmm.py

示例14: __init__

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def __init__(self, units, output_dim,
                 activation='tanh',
                 return_probabilities=False,
                 name='AttentionDecoder',
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """
        Implements an AttentionDecoder that takes in a sequence encoded by an
        encoder and outputs the decoded states 
        :param units: dimension of the hidden state and the attention matrices
        :param output_dim: the number of labels in the output space

        references:
            Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. 
            "Neural machine translation by jointly learning to align and translate." 
            arXiv preprint arXiv:1409.0473 (2014).
        """
        self.units = units
        self.output_dim = output_dim
        self.return_probabilities = return_probabilities
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionDecoder, self).__init__(**kwargs)
        self.name = name
        self.return_sequences = True  # must return sequences 
开发者ID:datalogue,项目名称:keras-attention,代码行数:46,代码来源:custom_recurrents.py

示例15: step

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import tanh [as 别名]
def step(self, x, states):

        ytm, stm = states

        # repeat the hidden state to the length of the sequence
        _stm = K.repeat(stm, self.timesteps)

        # now multiplty the weight matrix with the repeated hidden state
        _Wxstm = K.dot(_stm, self.W_a)

        # calculate the attention probabilities
        # this relates how much other timesteps contributed to this one.
        et = K.dot(activations.tanh(_Wxstm + self._uxpb),
                   K.expand_dims(self.V_a))
        at = K.exp(et)
        at_sum = K.sum(at, axis=1)
        at_sum_repeated = K.repeat(at_sum, self.timesteps)
        at /= at_sum_repeated  # vector of size (batchsize, timesteps, 1)

        # calculate the context vector
        context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
        # ~~~> calculate new hidden state
        # first calculate the "r" gate:

        rt = activations.sigmoid(
            K.dot(ytm, self.W_r)
            + K.dot(stm, self.U_r)
            + K.dot(context, self.C_r)
            + self.b_r)

        # now calculate the "z" gate
        zt = activations.sigmoid(
            K.dot(ytm, self.W_z)
            + K.dot(stm, self.U_z)
            + K.dot(context, self.C_z)
            + self.b_z)

        # calculate the proposal hidden state:
        s_tp = activations.tanh(
            K.dot(ytm, self.W_p)
            + K.dot((rt * stm), self.U_p)
            + K.dot(context, self.C_p)
            + self.b_p)

        # new hidden state:
        st = (1-zt)*stm + zt * s_tp

        yt = activations.softmax(
            K.dot(ytm, self.W_o)
            + K.dot(stm, self.U_o)
            + K.dot(context, self.C_o)
            + self.b_o)

        if self.return_probabilities:
            return at, [yt, st]
        else:
            return yt, [yt, st] 
开发者ID:datalogue,项目名称:keras-attention,代码行数:59,代码来源:custom_recurrents.py


注:本文中的keras.activations.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。