当前位置: 首页>>代码示例>>Python>>正文


Python layers.LSTM属性代码示例

本文整理汇总了Python中tensorflow.keras.layers.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python layers.LSTM属性的具体用法?Python layers.LSTM怎么用?Python layers.LSTM使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.LSTM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 aggregator_type,
                 feat_drop=0.,
                 bias=True,
                 norm=None,
                 activation=None):
        super(SAGEConv, self).__init__()

        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._aggre_type = aggregator_type
        self.norm = norm
        self.feat_drop = layers.Dropout(feat_drop)
        self.activation = activation
        # aggregator type: mean/pool/lstm/gcn
        if aggregator_type == 'pool':
            self.fc_pool = layers.Dense(self._in_src_feats)
        if aggregator_type == 'lstm':
            self.lstm = layers.LSTM(units=self._in_src_feats)
        if aggregator_type != 'gcn':
            self.fc_self = layers.Dense(out_feats, use_bias=bias)
        self.fc_neigh = layers.Dense(out_feats, use_bias=bias) 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:sageconv.py

示例2: create_and_append_layer

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def create_and_append_layer(self, layer, rnn_hidden_layers, activation, output_layer=False):
        layer_type_name = layer[0].lower()
        hidden_size = layer[1]
        if output_layer and self.return_final_seq_only: return_sequences = False
        else: return_sequences = True
        if layer_type_name == "lstm":
            rnn_hidden_layers.extend([LSTM(units=hidden_size, kernel_initializer=self.initialiser_function,
                                           return_sequences=return_sequences)])
        elif layer_type_name == "gru":
            rnn_hidden_layers.extend([GRU(units=hidden_size, kernel_initializer=self.initialiser_function,
                                          return_sequences=return_sequences)])
        elif layer_type_name == "linear":
            rnn_hidden_layers.extend(
                [Dense(units=hidden_size, activation=activation, kernel_initializer=self.initialiser_function)])
        else:
            raise ValueError("Wrong layer names")
        input_dim = hidden_size
        return input_dim 
开发者ID:p-christ,项目名称:nn_builder,代码行数:20,代码来源:RNN.py

示例3: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def __init__(self,
                 latent_dim: int,
                 output_dim: int,
                 output_activation: str = None,
                 name: str = 'decoder_lstm') -> None:
        """
        LSTM decoder.

        Parameters
        ----------
        latent_dim
            Latent dimension.
        output_dim
            Decoder output dimension.
        output_activation
            Activation used in the Dense output layer.
        name
            Name of decoder.
        """
        super(DecoderLSTM, self).__init__(name=name)
        self.decoder_net = LSTM(latent_dim, return_state=True, return_sequences=True)
        self.dense = Dense(output_dim, activation=output_activation) 
开发者ID:SeldonIO,项目名称:alibi-detect,代码行数:24,代码来源:autoencoder.py

示例4: create_keras_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def create_keras_model(input_dim, learning_rate, window_size):
    """Creates Keras model for regression.

    Args:
      input_dim: How many features the input has
      learning_rate: Learning rate for training

    Returns:
      The compiled Keras model (still needs to be trained)
    """

    model = keras.Sequential([
        layers.LSTM(4, dropout = 0.2, input_shape = (input_dim, window_size)),
        layers.Dense(1)
    ])

    model.compile(loss='mean_squared_error', optimizer=tf.train.AdamOptimizer(
        learning_rate=learning_rate))  

    return model 
开发者ID:kubeflow,项目名称:pipelines,代码行数:22,代码来源:model.py

示例5: create_lstm_layer_2

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def create_lstm_layer_2(self):
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.aggregation_dim,
                                    input_shape=(self.max_sequence_length, 8 * self.perspective_num,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=False),
                               merge_mode='concat',
                               name="sentence_embedding")
        return bioutp 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:19,代码来源:mpm_siamese_network.py

示例6: _lstm_reducer

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def _lstm_reducer(self, nodes):
        """LSTM reducer
        NOTE(zihao): lstm reducer with default schedule (degree bucketing)
        is slow, we could accelerate this with degree padding in the future.
        """
        m = nodes.mailbox['m']  # (B, L, D)
        rst = self.lstm(m)
        return {'neigh': rst} 
开发者ID:dmlc,项目名称:dgl,代码行数:10,代码来源:sageconv.py

示例7: lstm_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def lstm_model():
    model = Sequential()
    model.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2, input_shape=(cfg.pose_vec_dim, cfg.window)))
    model.add(Dense(16, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(len(cfg.activity_dict), activation='softmax'))
    print(model.summary())
    return model 
开发者ID:smellslikeml,项目名称:ActionAI,代码行数:10,代码来源:model.py

示例8: lstm_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def lstm_model():
    model = Sequential()
    model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, input_shape=(pose_vec_dim, window)))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(len(class_names), activation='softmax'))
    print(model.summary())
    return model 
开发者ID:smellslikeml,项目名称:ActionAI,代码行数:10,代码来源:train.py

示例9: build_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def build_model(self):
        model = Sequential()
        model.add(LSTM(32, input_shape=self.input_shape, return_sequences=False))
        adam = Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False)
        model.add(Dense(1))
        model.compile(loss='mean_squared_error', optimizer=adam)
        return model 
开发者ID:carlomazzaferro,项目名称:kryptoflow,代码行数:9,代码来源:model.py

示例10: load

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def load(input_shape, output_shape, cfg):
    nb_lstm_states = int(cfg['nb_lstm_states'])


    inputs = KL.Input(shape=input_shape)
    x = KL.LSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)

    mu = KL.Dense(1)(x)
    std = KL.Dense(1)(x)
    std = KL.Activation(tf.exp, name="exponential_activation")(std)

    output = KL.Concatenate(axis=-1)([std, mu])
    model = KM.Model(inputs=[inputs], outputs=[output])

    return model 
开发者ID:johnmartinsson,项目名称:blood-glucose-prediction,代码行数:17,代码来源:basic_lstm_independent_keras.py

示例11: _build_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def _build_model(self):
        # Neural Net for Deep-Q learning Model
        # input:state; output:action value
        model = Sequential()
        model.add(Dense(256, input_dim=self.state_size, activation='relu'))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.3))
        #model.add((LSTM(128))
        model.add(Dense(self.action_size, activation='linear'))
        model.compile(loss='mse',
                      optimizer=Adam(lr=self.learning_rate))
        return model 
开发者ID:multi-commander,项目名称:Multi-Commander,代码行数:14,代码来源:dqn_agent.py

示例12: create_lstm_layer_1

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def create_lstm_layer_1(self):
        ker_in = glorot_uniform(seed=self.seed)
        rec_in = Orthogonal(seed=self.seed)
        bioutp = Bidirectional(LSTM(self.hidden_dim,
                                    input_shape=(self.max_sequence_length, self.embedding_dim,),
                                    kernel_regularizer=None,
                                    recurrent_regularizer=None,
                                    bias_regularizer=None,
                                    activity_regularizer=None,
                                    recurrent_dropout=self.recdrop_val,
                                    dropout=self.inpdrop_val,
                                    kernel_initializer=ker_in,
                                    recurrent_initializer=rec_in,
                                    return_sequences=True), merge_mode=None)
        return bioutp 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:17,代码来源:mpm_siamese_network.py

示例13: build

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError(
                'Expect the input tensor to have '
                'at least 3 dimensions for rnn models, '
                'but got {shape}'.format(shape=input_node.shape))

        feature_size = shape[-1]
        output_node = input_node

        bidirectional = self.bidirectional
        if bidirectional is None:
            bidirectional = hp.Boolean('bidirectional', default=True)
        layer_type = self.layer_type or hp.Choice('layer_type',
                                                  ['gru', 'lstm'],
                                                  default='lstm')
        num_layers = self.num_layers or hp.Choice('num_layers',
                                                  [1, 2, 3],
                                                  default=2)
        rnn_layers = {
            'gru': layers.GRU,
            'lstm': layers.LSTM
        }
        in_layer = rnn_layers[layer_type]
        for i in range(num_layers):
            return_sequences = True
            if i == num_layers - 1:
                return_sequences = self.return_sequences
            if bidirectional:
                output_node = layers.Bidirectional(
                    in_layer(feature_size,
                             return_sequences=return_sequences))(output_node)
            else:
                output_node = in_layer(
                    feature_size,
                    return_sequences=return_sequences)(output_node)
        return output_node 
开发者ID:keras-team,项目名称:autokeras,代码行数:43,代码来源:basic.py

示例14: test_llr

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def test_llr(llr_params):
    # LLR parameters
    threshold, threshold_perc, return_instance_score, return_feature_score, outlier_type = llr_params

    # define model and detector
    inputs = Input(shape=(shape[-1] - 1,), dtype=tf.int32)
    x = tf.one_hot(tf.cast(inputs, tf.int32), input_dim)
    x = LSTM(hidden_dim, return_sequences=True)(x)
    logits = Dense(input_dim, activation=None)(x)
    model = tf.keras.Model(inputs=inputs, outputs=logits)

    od = LLR(threshold=threshold, sequential=True, model=model, log_prob=likelihood_fn)

    assert od.threshold == threshold
    assert od.meta == {'name': 'LLR', 'detector_type': 'offline', 'data_type': None}

    od.fit(
        X_train,
        loss_fn=loss_fn,
        mutate_fn_kwargs={'rate': .5, 'feature_range': (0, input_dim)},
        epochs=1,
        verbose=False
    )

    od.infer_threshold(X_val, threshold_perc=threshold_perc)
    # iscore_test = od.score(X_test)[1]
    # iscore_train = od.score(X_train)[1]
    # assert (iscore_test > iscore_train).all()

    od_preds = od.predict(X_test,
                          return_instance_score=return_instance_score,
                          return_feature_score=return_feature_score,
                          outlier_type=outlier_type)

    assert od_preds['meta'] == od.meta
    if outlier_type == 'instance':
        assert od_preds['data']['is_outlier'].shape == (X_test.shape[0],)
        if return_instance_score:
            assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
                                                            > od.threshold).astype(int).sum()
    elif outlier_type == 'feature':
        assert od_preds['data']['is_outlier'].shape == (X_test.shape[0], X_test.shape[1] - 1)
        if return_feature_score:
            assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
                                                            > od.threshold).astype(int).sum()

    if return_feature_score:
        assert od_preds['data']['feature_score'].shape == (X_test.shape[0], X_test.shape[1] - 1)
    else:
        assert od_preds['data']['feature_score'] is None

    if return_instance_score:
        assert od_preds['data']['instance_score'].shape == (X_test.shape[0],)
    else:
        assert od_preds['data']['instance_score'] is None 
开发者ID:SeldonIO,项目名称:alibi-detect,代码行数:57,代码来源:test_llr.py

示例15: main

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import LSTM [as 别名]
def main():
    numpy.random.seed(7)

    # data. definition of the problem.
    seq_length = 20
    x_train, y_train = task_add_two_numbers_after_delimiter(20_000, seq_length)
    x_val, y_val = task_add_two_numbers_after_delimiter(4_000, seq_length)

    # just arbitrary values. it's for visual purposes. easy to see than random values.
    test_index_1 = 4
    test_index_2 = 9
    x_test, _ = task_add_two_numbers_after_delimiter(10, seq_length, 0, test_index_1, test_index_2)
    # x_test_mask is just a mask that, if applied to x_test, would still contain the information to solve the problem.
    # we expect the attention map to look like this mask.
    x_test_mask = np.zeros_like(x_test[..., 0])
    x_test_mask[:, test_index_1:test_index_1 + 1] = 1
    x_test_mask[:, test_index_2:test_index_2 + 1] = 1

    # model
    i = Input(shape=(seq_length, 1))
    x = LSTM(100, return_sequences=True)(i)
    x = attention_3d_block(x)
    x = Dropout(0.2)(x)
    x = Dense(1, activation='linear')(x)

    model = Model(inputs=[i], outputs=[x])
    model.compile(loss='mse', optimizer='adam')
    print(model.summary())

    output_dir = 'task_add_two_numbers'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    max_epoch = int(sys.argv[1]) if len(sys.argv) > 1 else 200

    class VisualiseAttentionMap(Callback):

        def on_epoch_end(self, epoch, logs=None):
            attention_map = get_activations(model, x_test, layer_name='attention_weight')['attention_weight']

            # top is attention map.
            # bottom is ground truth.
            plt.imshow(np.concatenate([attention_map, x_test_mask]), cmap='hot')

            iteration_no = str(epoch).zfill(3)
            plt.axis('off')
            plt.title(f'Iteration {iteration_no} / {max_epoch}')
            plt.savefig(f'{output_dir}/epoch_{iteration_no}.png')
            plt.close()
            plt.clf()

    model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=max_epoch,
              batch_size=64, callbacks=[VisualiseAttentionMap()]) 
开发者ID:philipperemy,项目名称:keras-attention-mechanism,代码行数:55,代码来源:example-attention.py


注:本文中的tensorflow.keras.layers.LSTM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。