當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.Embedding方法代碼示例

本文整理匯總了Python中tensorflow.python.keras.layers.Embedding方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Embedding方法的具體用法?Python layers.Embedding怎麽用?Python layers.Embedding使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.keras.layers的用法示例。


在下文中一共展示了layers.Embedding方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
開發者ID:d909b,項目名稱:cxplain,代碼行數:24,代碼來源:rnn.py

示例2: GCN

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def GCN(adj_dim,feature_dim,n_hidden, num_class, num_layers=2,activation=tf.nn.relu,dropout_rate=0.5, l2_reg=0, feature_less=True, ):
    Adj = Input(shape=(None,), sparse=True)
    if feature_less:
        X_in = Input(shape=(1,), )

        emb = Embedding(adj_dim, feature_dim,
                        embeddings_initializer=Identity(1.0), trainable=False)
        X_emb = emb(X_in)
        h = Reshape([X_emb.shape[-1]])(X_emb)
    else:
        X_in = Input(shape=(feature_dim,), )

        h = X_in

    for i in range(num_layers):
        if i == num_layers - 1:
            activation = tf.nn.softmax
            n_hidden = num_class
        h = GraphConvolution(n_hidden, activation=activation, dropout_rate=dropout_rate, l2_reg=l2_reg)([h,Adj])

    output = h
    model = Model(inputs=[X_in,Adj], outputs=output)

    return model 
開發者ID:shenweichen,項目名稱:GraphNeuralNetwork,代碼行數:26,代碼來源:gcn.py

示例3: create_embedding_dict

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, seed, l2_reg,
                          prefix='sparse_', seq_mask_zero=True):
    sparse_embedding = {}
    for feat in sparse_feature_columns:
        emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
                        embeddings_initializer=feat.embeddings_initializer,
                        embeddings_regularizer=l2(l2_reg),
                        name=prefix + '_emb_' + feat.embedding_name)
        emb.trainable = feat.trainable
        sparse_embedding[feat.embedding_name] = emb

    if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0:
        for feat in varlen_sparse_feature_columns:
            # if feat.name not in sparse_embedding:
            emb = Embedding(feat.vocabulary_size, feat.embedding_dim,
                            embeddings_initializer=feat.embeddings_initializer,
                            embeddings_regularizer=l2(
                                l2_reg),
                            name=prefix + '_seq_emb_' + feat.name,
                            mask_zero=seq_mask_zero)
            emb.trainable = feat.trainable
            sparse_embedding[feat.embedding_name] = emb
    return sparse_embedding 
開發者ID:shenweichen,項目名稱:DeepCTR,代碼行數:25,代碼來源:inputs.py

示例4: get_embedding

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def get_embedding(region_num, region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, init_std, seed, l2_reg_linear):

    region_embeddings = [[Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed+j), embeddings_regularizer=l2(l2_reg_linear),
                                    name='region_emb_' + str(j)+'_' + str(i)) for
                          i, feat in enumerate(region_feature_dim_dict['sparse'])] for j in range(region_num)]
    base_embeddings = [[Embedding(feat.dimension, 1,
                                  embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed + j), embeddings_regularizer=l2(l2_reg_linear),
                                  name='base_emb_' + str(j) + '_' + str(i)) for
                        i, feat in enumerate(base_feature_dim_dict['sparse'])] for j in range(region_num)]
    bias_embedding = [Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_linear),
                                name='embed_bias' + '_' + str(i)) for
                      i, feat in enumerate(bias_feature_dim_dict['sparse'])]

    return region_embeddings, base_embeddings, bias_embedding 
開發者ID:ShenDezhou,項目名稱:icme2019,代碼行數:16,代碼來源:mlr.py

示例5: create_model

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def create_model(numNodes, embedding_size, order='second'):

    v_i = Input(shape=(1,))
    v_j = Input(shape=(1,))

    first_emb = Embedding(numNodes, embedding_size, name='first_emb')
    second_emb = Embedding(numNodes, embedding_size, name='second_emb')
    context_emb = Embedding(numNodes, embedding_size, name='context_emb')

    v_i_emb = first_emb(v_i)
    v_j_emb = first_emb(v_j)

    v_i_emb_second = second_emb(v_i)
    v_j_context_emb = context_emb(v_j)

    first = Lambda(lambda x: tf.reduce_sum(
        x[0]*x[1], axis=-1, keep_dims=False), name='first_order')([v_i_emb, v_j_emb])
    second = Lambda(lambda x: tf.reduce_sum(
        x[0]*x[1], axis=-1, keep_dims=False), name='second_order')([v_i_emb_second, v_j_context_emb])

    if order == 'first':
        output_list = [first]
    elif order == 'second':
        output_list = [second]
    else:
        output_list = [first, second]

    model = Model(inputs=[v_i, v_j], outputs=output_list)

    return model, {'first': first_emb, 'second': second_emb} 
開發者ID:shenweichen,項目名稱:GraphEmbedding,代碼行數:32,代碼來源:line.py

示例6: build

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def build(self, lambda_u=0.0001, lambda_v=0.0001, optimizer='rmsprop',
              loss='mse', metrics='mse', initializer='uniform'):
        """
        Init session and create model architecture.
        :param lambda_u: lambda value of l2 norm for user embeddings.
        :param lambda_v: lambda value of l2 norm for item embeddings.
        :param optimizer: optimizer type.
        :param loss: loss type.
        :param metrics: evaluation metrics.
        :param initializer: initializer of embedding
        :return:
        """
        # init session on first time ref
        sess = self.session
        # user embedding
        user_input_layer = Input(shape=(1,), dtype='int32', name='user_input')
        user_embedding_layer = Embedding(
            input_dim=self.user_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='user_embedding',
            embeddings_regularizer=l2(lambda_u),
            embeddings_initializer=initializer)(user_input_layer)
        user_embedding_layer = Flatten(name='user_flatten')(user_embedding_layer)

        # item embedding
        item_input_layer = Input(shape=(1,), dtype='int32', name='item_input')
        item_embedding_layer = Embedding(
            input_dim=self.item_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='item_embedding',
            embeddings_regularizer=l2(lambda_v),
            embeddings_initializer=initializer)(item_input_layer)
        item_embedding_layer = Flatten(name='item_flatten')(item_embedding_layer)

        # rating prediction
        dot_layer = Dot(axes=-1,
                        name='dot_layer')([user_embedding_layer,
                                           item_embedding_layer])
        self._model = Model(
            inputs=[user_input_layer, item_input_layer], outputs=[dot_layer])

        # compile model
        optimizer_instance = getattr(
            tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding for aggregating
        self._trainable_weights = {v.name.split(
            "/")[0]: v for v in self._model.trainable_weights}
        self._aggregate_weights = {
            "user_embedding": self._trainable_weights["user_embedding"]} 
開發者ID:FederatedAI,項目名稱:FATE,代碼行數:56,代碼來源:backend.py

示例7: _build

# 需要導入模塊: from tensorflow.python.keras import layers [as 別名]
# 或者: from tensorflow.python.keras.layers import Embedding [as 別名]
def _build(self, lamda_u=0.0001, lamda_v=0.0001, optimizer='rmsprop',
               loss='mse', metrics='mse', initializer='uniform'):
        # init session on first time ref
        sess = self.session

        # user embedding
        user_InputLayer = Input(shape=(1,), dtype='int32', name='user_input')
        user_EmbeddingLayer = Embedding(input_dim=self.user_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='user_embedding',
                                        embeddings_regularizer=l2(lamda_u),
                                        embeddings_initializer=initializer)(user_InputLayer)
        user_EmbeddingLayer = Flatten(name='user_flatten')(user_EmbeddingLayer)

        # user bias
        user_BiasLayer = Embedding(input_dim=self.user_num, output_dim=1, input_length=1,
                                   name='user_bias', embeddings_regularizer=l2(lamda_u),
                                   embeddings_initializer=Zeros())(user_InputLayer)
        user_BiasLayer = Flatten(name='user_bias_flatten')(user_BiasLayer)

        # item embedding
        item_InputLayer = Input(shape=(1,), dtype='int32', name='item_input')
        item_EmbeddingLayer = Embedding(input_dim=self.item_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='item_embedding',
                                        embeddings_regularizer=l2(lamda_v),
                                        embeddings_initializer=initializer)(item_InputLayer)
        item_EmbeddingLayer = Flatten(name='item_flatten')(item_EmbeddingLayer)

        # item bias
        item_BiasLayer = Embedding(input_dim=self.item_num, output_dim=1, input_length=1,
                                   name='item_bias', embeddings_regularizer=l2(lamda_v),
                                   embeddings_initializer=Zeros())(item_InputLayer)
        item_BiasLayer = Flatten(name='item_bias_flatten')(item_BiasLayer)

        # rating prediction
        dotLayer = Dot(axes=-1, name='dot_layer')([user_EmbeddingLayer, item_EmbeddingLayer])

        # add mu, user bias and item bias
        dotLayer = ConstantLayer(mu=self.mu)(dotLayer)
        dotLayer = Add()([dotLayer, user_BiasLayer])
        dotLayer = Add()([dotLayer, item_BiasLayer])

        # create model
        self._model = Model(inputs=[user_InputLayer, item_InputLayer], outputs=[dotLayer])

        # compile model
        optimizer_instance = getattr(tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding and user_bias for aggregating
        self._trainable_weights = {v.name.split("/")[0]: v for v in self._model.trainable_weights}
        LOGGER.debug(f"trainable weights {self._trainable_weights}")
        self._aggregate_weights = {"user_embedding": self._trainable_weights["user_embedding"],
                                   "user_bias": self._trainable_weights["user_bias"]} 
開發者ID:FederatedAI,項目名稱:FATE,代碼行數:60,代碼來源:backend.py


注:本文中的tensorflow.python.keras.layers.Embedding方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。