当前位置: 首页>>代码示例>>Python>>正文


Python layers.Dot方法代码示例

本文整理汇总了Python中keras.layers.Dot方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Dot方法的具体用法?Python layers.Dot怎么用?Python layers.Dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.Dot方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def build(self):
        """
        Build model structure.

        DSSM use Siamese arthitecture.
        """
        dim_triletter = self._params['input_shapes'][0][0]
        input_shape = (dim_triletter,)
        base_network = self._make_multi_layer_perceptron_layer()
        # Left input and right input.
        input_left = Input(name='text_left', shape=input_shape)
        input_right = Input(name='text_right', shape=input_shape)
        # Process left & right input.
        x = [base_network(input_left),
             base_network(input_right)]
        # Dot product with cosine similarity.
        x = Dot(axes=[1, 1], normalize=True)(x)
        x_out = self._make_output_layer()(x)
        self._backend = Model(
            inputs=[input_left, input_right],
            outputs=x_out) 
开发者ID:NTMC-Community,项目名称:MatchZoo,代码行数:23,代码来源:dssm.py

示例2: seq2seq_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
    # Define the inputs of your model with a shape (Tx, feature)
    X = Input(shape=input_shape)

    # Initialize empty list of outputs
    all_outputs = []

    # Encoder: pre-attention LSTM
    encoder = LSTM(units=100, return_state=True, return_sequences=True, name='encoder')
    # Decoder: post-attention LSTM
    decoder = LSTM(units=100, return_state=True, name='decoder')
    # Output
    decoder_output = Dense(units=feature_len, activation='linear', name='output')
    model_output = Reshape((1, feature_len))

    # Attention
    repeator = RepeatVector(time_step)
    concatenator = Concatenate(axis=-1)
    densor = Dense(1, activation = "relu")
    activator = Activation(softmax, name='attention_weights')
    dotor =  Dot(axes = 1)

    encoder_outputs, s, c = encoder(X)

    for t in range(after_day):
        context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)

        a, s, c = decoder(context, initial_state=[s, c])

        outputs = decoder_output(a)
        outputs = model_output(outputs)
        all_outputs.append(outputs)

    all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
    model = Model(inputs=X, outputs=all_outputs)

    return model 
开发者ID:kaka-lin,项目名称:stock-price-predict,代码行数:39,代码来源:seq2seq_attention_2.py

示例3: seq2seq_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
    # Define the inputs of your model with a shape (Tx, feature)
    X = Input(shape=input_shape)
    s0 = Input(shape=(100, ), name='s0')
    c0 = Input(shape=(100, ), name='c0')
    s = s0
    c = c0

    # Initialize empty list of outputs
    all_outputs = []

    # Encoder: pre-attention LSTM
    encoder = LSTM(units=100, return_state=False, return_sequences=True, name='encoder')
    # Decoder: post-attention LSTM
    decoder = LSTM(units=100, return_state=True, name='decoder')
    # Output
    decoder_output = Dense(units=feature_len, activation='linear', name='output')
    model_output = Reshape((1, feature_len))

    # Attention
    repeator = RepeatVector(time_step)
    concatenator = Concatenate(axis=-1)
    densor = Dense(1, activation = "relu")
    activator = Activation(softmax, name='attention_weights')
    dotor =  Dot(axes = 1)

    encoder_outputs = encoder(X)

    for t in range(after_day):
        context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)

        a, s, c = decoder(context, initial_state=[s, c])

        outputs = decoder_output(a)
        outputs = model_output(outputs)
        all_outputs.append(outputs)

    all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
    model = Model(inputs=[X, s0, c0], outputs=all_outputs)

    return model 
开发者ID:kaka-lin,项目名称:stock-price-predict,代码行数:43,代码来源:seq2seq_attention.py

示例4: test_merge_dot

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:29,代码来源:merge_test.py

示例5: build

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def build(self):
    # qd_input = Input((self.config.kernel_size,), name="qd_input")
    dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input')
    # z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input)
    # qd_out = Dense(self.config.out_size, name="qd_out")(z)

    z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input)
    dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z)

    dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
    dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
    # dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)

    dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
    dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)

    if self.config.method in [1, 3]: # no doc gating, with dense layer
      z = dd_init_out
    elif self.config.method == 2:
      logging.info("Apply doc gating")
      z = Multiply(name='dd_out')([dd_init_out, dd_w])
    else:
      raise ValueError("Method not initialized, please check config file")

    if self.config.method in [1, 2]:
      logging.info("Dense layer on top")
      z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
      out = Dense(self.config.merge_out, name='score')(z)
    else:
      logging.info("Apply doc gating, No dense layer on top, sum up scores")
      out = Dot(axes=[1, 1], name='score')([z, dd_w])

    model = Model(inputs=[dd_input, dd_gate], outputs=[out])
    print(model.summary())

    return model 
开发者ID:ucasir,项目名称:NPRF,代码行数:38,代码来源:nprf_knrm.py

示例6: soft_attention_alignment

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def soft_attention_alignment(input_1, input_2):
    "Align text representation with neural soft attention"
    attention = Dot(axes=-1)([input_1, input_2])
    w_att_1 = Lambda(lambda x: softmax(x, axis=1),
                     output_shape=unchanged_shape)(attention)
    w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),
                             output_shape=unchanged_shape)(attention))
    in1_aligned = Dot(axes=1)([w_att_1, input_1])
    in2_aligned = Dot(axes=1)([w_att_2, input_2])
    return in1_aligned, in2_aligned 
开发者ID:zake7749,项目名称:CIKM-AnalytiCup-2018,代码行数:12,代码来源:utils.py

示例7: connect_models

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def connect_models(a, b):
	"""Connect two keras models affinely.

	The models, a and b, are connected as decoupling * u_nom + a(input)' u_pert
	+ b(input).

	Let s be the input size of both models, m be the number of inputs.

	Outputs keras model (R^m * R^s * R^m * R^m -> R).

	Inputs:
	Linear term, a: keras model (R^s -> R^m)
	Scalar term, b: keras model (R^s -> R)
	"""

	s, m = a.input_shape[-1], a.output_shape[-1]

	input = Input((s,))
	u_nom = Input((m,))
	u_pert = Input((m,))

	decoupling = Input((m,))

	a = a(input)
	b = b(input)

	u = Add()([u_nom, u_pert])
	known = Dot([1, 1])([decoupling, u_pert])
	unknown = Add()([Dot([1, 1])([a, u]), b])

	V_dot_r = Add()([known, unknown])

	return Model([decoupling, input, u_nom, u_pert], V_dot_r) 
开发者ID:vdorobantu,项目名称:lyapy,代码行数:35,代码来源:util.py

示例8: glove_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def glove_model(vocab_size=10, vector_dim=3):
    """
    A Keras implementation of the GloVe architecture
    :param vocab_size: The number of distinct words
    :param vector_dim: The vector dimension of each word
    :return:
    """
    input_target = Input((1,), name='central_word_id')
    input_context = Input((1,), name='context_word_id')

    central_embedding = Embedding(vocab_size, vector_dim, input_length=1, name=CENTRAL_EMBEDDINGS)
    central_bias = Embedding(vocab_size, 1, input_length=1, name=CENTRAL_BIASES)

    context_embedding = Embedding(vocab_size, vector_dim, input_length=1, name=CONTEXT_EMBEDDINGS)
    context_bias = Embedding(vocab_size, 1, input_length=1, name=CONTEXT_BIASES)

    vector_target = central_embedding(input_target)
    vector_context = context_embedding(input_context)

    bias_target = central_bias(input_target)
    bias_context = context_bias(input_context)

    dot_product = Dot(axes=-1)([vector_target, vector_context])
    dot_product = Reshape((1, ))(dot_product)
    bias_target = Reshape((1,))(bias_target)
    bias_context = Reshape((1,))(bias_context)

    prediction = Add()([dot_product, bias_target, bias_context])

    model = Model(inputs=[input_target, input_context], outputs=prediction)
    model.compile(loss=custom_loss, optimizer=Adam())

    return model 
开发者ID:erwtokritos,项目名称:keras-glove,代码行数:35,代码来源:models.py

示例9: build

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Dot [as 别名]
def build(self):

    dd_q_input = Input((self.config.nb_supervised_doc, self.config.doc_topk_term, 1), name='dd_q_input')
    dd_d_input = Input((self.config.nb_supervised_doc, self.config.doc_topk_term,
                        self.config.hist_size), name='dd_d_input')

    dd_q_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_q_gate')(dd_q_input)
    dd_q_w = Lambda(lambda x: softmax(x, axis=2), output_shape=(
                  self.config.nb_supervised_doc, self.config.doc_topk_term,), name='dd_q_softmax')(dd_q_w)

    z = dd_d_input
    for i in range(self.config.nb_layers):
      z = Dense(self.config.hidden_size[i], activation='tanh',
                kernel_initializer=self.initializer_fc, name='hidden')(z)
    z = Dense(self.config.out_size, kernel_initializer=self.initializer_fc, name='dd_d_gate')(z)
    z = Reshape((self.config.nb_supervised_doc, self.config.doc_topk_term,))(z)
    dd_q_w = Reshape((self.config.nb_supervised_doc, self.config.doc_topk_term,))(dd_q_w)
    # out = Dot(axes=[2, 2], name='dd_pseudo_out')([z, dd_q_w])

    out = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]), name='dd_pseudo_out')([z, dd_q_w])
    dd_init_out = Lambda(lambda x: tf.matrix_diag_part(x), output_shape=(self.config.nb_supervised_doc,), name='dd_init_out')(out)
    '''
    dd_init_out = Lambda(lambda x: tf.reduce_sum(x, axis=2), output_shape=(self.config.nb_supervised_doc,))(z)
    '''
    #dd_out = Reshape((self.config.nb_supervised_doc,))(dd_out)

    # dd out gating
    dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
    dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
    # dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)

    # dd_out = Dot(axes=[1, 1], name='dd_out')([dd_init_out, dd_w])
    dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
    dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)


    if self.config.method in [1, 3]: # no doc gating, with dense layer
      z = dd_init_out
    elif self.config.method == 2:
      logging.info("Apply doc gating")
      z = Multiply(name='dd_out')([dd_init_out, dd_w])
    else:
      raise ValueError("Method not initialized, please check config file")

    if self.config.method in [1, 2]:
      logging.info("Dense layer on top")
      z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
      out = Dense(self.config.merge_out, name='score')(z)
    else:
      logging.info("Apply doc gating, No dense layer on top, sum up scores")
      out = Dot(axes=[1, 1], name='score')([z, dd_w])

    model = Model(inputs=[dd_q_input, dd_d_input, dd_gate], outputs=[out])
    print(model.summary())

    return model 
开发者ID:ucasir,项目名称:NPRF,代码行数:58,代码来源:nprf_drmm.py


注:本文中的keras.layers.Dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。