当前位置: 首页>>代码示例>>Python>>正文


Python layers.dot方法代码示例

本文整理汇总了Python中keras.layers.dot方法的典型用法代码示例。如果您正苦于以下问题:Python layers.dot方法的具体用法?Python layers.dot怎么用?Python layers.dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers的用法示例。


在下文中一共展示了layers.dot方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: self_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def self_attention(x):
    
    ''' 
    .  stands for dot product 
    *  stands for elemwise multiplication
        
    m = x . transpose(x)
    n = softmax(m)
    o = n . x  
    a = o * x           
       
    return a
        
    '''

    m = dot([x, x], axes=[2,2])
    n = Activation('softmax')(m)
    o = dot([n, x], axes=[2,1])
    a = multiply([o, x])
        
    return a 
开发者ID:soujanyaporia,项目名称:contextual-multimodal-fusion,代码行数:23,代码来源:trimodal_attention_models.py

示例2: test_tiny_cos_random

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def test_tiny_cos_random(self):
        np.random.seed(1988)
        input_dim = 10
        num_channels = 6

        # Define a model
        input_tensor = Input(shape=(input_dim,))
        x1 = Dense(num_channels)(input_tensor)
        x2 = Dense(num_channels)(x1)
        x3 = Dense(num_channels)(x1)
        x4 = dot([x2, x3], axes=-1, normalize=True)
        x5 = Dense(num_channels)(x4)

        model = Model(inputs=[input_tensor], outputs=[x5])

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_model(model) 
开发者ID:apple,项目名称:coremltools,代码行数:22,代码来源:test_keras2_numeric.py

示例3: create_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def create_model(self):
        user_id_input = Input(shape=[1], name='user')
        item_id_input = Input(shape=[1], name='item')

        user_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_user_id + 1,
                                   input_length=1, name='user_embedding')(user_id_input)
        item_embedding = Embedding(output_dim=EMBEDDING_SIZE, input_dim=self.max_item_id + 1,
                                   input_length=1, name='item_embedding')(item_id_input)

        # reshape from shape: (batch_size, input_length, embedding_size)
        # to shape: (batch_size, input_length * embedding_size) which is
        # equal to shape: (batch_size, embedding_size)
        user_vecs = Flatten()(user_embedding)
        item_vecs = Flatten()(item_embedding)

        # y = merge([user_vecs, item_vecs], mode='dot', output_shape=(1,))
        y = dot([user_vecs, item_vecs], axes=1)

        model = Model(inputs=[user_id_input, item_id_input], outputs=[y])
        model.compile(optimizer='adam', loss='mae')

        return model 
开发者ID:chen0040,项目名称:keras-recommender,代码行数:24,代码来源:cf.py

示例4: test_merge_dot

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def test_merge_dot():
    i1 = layers.Input(shape=(4,))
    i2 = layers.Input(shape=(4,))
    o = layers.dot([i1, i2], axes=1)
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)

    dot_layer = layers.Dot(axes=1)
    o2 = dot_layer([i1, i2])
    assert dot_layer.output_shape == (None, 1)

    x1 = np.random.random((2, 4))
    x2 = np.random.random((2, 4))
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    expected = np.zeros((2, 1))
    expected[0, 0] = np.dot(x1[0], x2[0])
    expected[1, 0] = np.dot(x1[1], x2[1])
    assert_allclose(out, expected, atol=1e-4)

    # Test with negative tuple of axes.
    o = layers.dot([i1, i2], axes=(-1, -1))
    assert o._keras_shape == (None, 1)
    model = models.Model([i1, i2], o)
    out = model.predict([x1, x2])
    assert out.shape == (2, 1)
    assert_allclose(out, expected, atol=1e-4) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:29,代码来源:merge_test.py

示例5: create_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def create_model(self):
        
        dat_input = Input(shape=(self.tdatlen,))
        com_input = Input(shape=(self.comlen,))
        sml_input = Input(shape=(self.smllen,))
        
        ee = Embedding(output_dim=self.embdims, input_dim=self.tdatvocabsize, mask_zero=False)(dat_input)
        se = Embedding(output_dim=self.smldims, input_dim=self.smlvocabsize, mask_zero=False)(sml_input)

        se_enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        seout, state_sml = se_enc(se)

        enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        encout, state_h = enc(ee, initial_state=state_sml)
        
        de = Embedding(output_dim=self.embdims, input_dim=self.comvocabsize, mask_zero=False)(com_input)
        dec = CuDNNGRU(self.recdims, return_sequences=True)
        decout = dec(de, initial_state=state_h)

        attn = dot([decout, encout], axes=[2, 2])
        attn = Activation('softmax')(attn)
        context = dot([attn, encout], axes=[2, 1])

        ast_attn = dot([decout, seout], axes=[2, 2])
        ast_attn = Activation('softmax')(ast_attn)
        ast_context = dot([ast_attn, seout], axes=[2, 1])

        context = concatenate([context, decout, ast_context])

        out = TimeDistributed(Dense(self.recdims, activation="relu"))(context)

        out = Flatten()(out)
        out = Dense(self.comvocabsize, activation="softmax")(out)
        
        model = Model(inputs=[dat_input, com_input, sml_input], outputs=out)

        if self.config['multigpu']:
            model = keras.utils.multi_gpu_model(model, gpus=2)
        
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        return self.config, model 
开发者ID:mcmillco,项目名称:funcom,代码行数:43,代码来源:ast_attendgru_xtra.py

示例6: bi_modal_attention

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def bi_modal_attention(x, y):
    
    ''' 
    .  stands for dot product 
    *  stands for elemwise multiplication
    {} stands for concatenation
        
    m1 = x . transpose(y) ||  m2 = y . transpose(x) 
    n1 = softmax(m1)      ||  n2 = softmax(m2)
    o1 = n1 . y           ||  o2 = m2 . x
    a1 = o1 * x           ||  a2 = o2 * y
       
    return {a1, a2}
        
    '''
     
    m1 = dot([x, y], axes=[2, 2])
    n1 = Activation('softmax')(m1)
    o1 = dot([n1, y], axes=[2, 1])
    a1 = multiply([o1, x])

    m2 = dot([y, x], axes=[2, 2])
    n2 = Activation('softmax')(m2)
    o2 = dot([n2, x], axes=[2, 1])
    a2 = multiply([o2, y])

    return concatenate([a1, a2]) 
开发者ID:soujanyaporia,项目名称:contextual-multimodal-fusion,代码行数:29,代码来源:trimodal_attention_models.py

示例7: eltwise

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def eltwise(layer, layer_in, layerId):
    out = {}
    if (layer['params']['layer_type'] == 'Multiply'):
        # This input reverse is to handle visualization
        out[layerId] = multiply(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Sum'):
        out[layerId] = add(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Average'):
        out[layerId] = average(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Dot'):
        out[layerId] = dot(layer_in[::-1], -1)
    else:
        out[layerId] = maximum(layer_in[::-1])
    return out 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:16,代码来源:layers_export.py

示例8: skipgram_model

# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import dot [as 别名]
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None 
开发者ID:lujiaying,项目名称:MovieTaster-Open,代码行数:41,代码来源:keras_item2vec.py


注:本文中的keras.layers.dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。