當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.GRU屬性代碼示例

本文整理匯總了Python中tensorflow.keras.layers.GRU屬性的典型用法代碼示例。如果您正苦於以下問題:Python layers.GRU屬性的具體用法?Python layers.GRU怎麽用?Python layers.GRU使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.GRU屬性的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_and_append_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def create_and_append_layer(self, layer, rnn_hidden_layers, activation, output_layer=False):
        layer_type_name = layer[0].lower()
        hidden_size = layer[1]
        if output_layer and self.return_final_seq_only: return_sequences = False
        else: return_sequences = True
        if layer_type_name == "lstm":
            rnn_hidden_layers.extend([LSTM(units=hidden_size, kernel_initializer=self.initialiser_function,
                                           return_sequences=return_sequences)])
        elif layer_type_name == "gru":
            rnn_hidden_layers.extend([GRU(units=hidden_size, kernel_initializer=self.initialiser_function,
                                          return_sequences=return_sequences)])
        elif layer_type_name == "linear":
            rnn_hidden_layers.extend(
                [Dense(units=hidden_size, activation=activation, kernel_initializer=self.initialiser_function)])
        else:
            raise ValueError("Wrong layer names")
        input_dim = hidden_size
        return input_dim 
開發者ID:p-christ,項目名稱:nn_builder,代碼行數:20,代碼來源:RNN.py

示例2: _create_encoder

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(n_layers):
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = GRU(
          self._embedding_dimension, return_sequences=True)(prev_layer)
    prev_layer = Lambda(lambda x: tf.gather_nd(x[0], x[1]))(
        [prev_layer, gather_indices])
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:15,代碼來源:seqtoseq.py

示例3: _create_decoder

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def _create_decoder(self, n_layers, dropout):
    """Create the decoder as a tf.keras.Model."""
    input = Input(shape=(self._embedding_dimension,))
    prev_layer = layers.Stack()(self._max_output_length * [input])
    for i in range(n_layers):
      if dropout > 0.0:
        prev_layer = Dropout(dropout)(prev_layer)
      prev_layer = GRU(
          self._embedding_dimension, return_sequences=True)(prev_layer)
    output = Dense(
        len(self._output_tokens), activation=tf.nn.softmax)(prev_layer)
    return tf.keras.Model(inputs=input, outputs=output) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:14,代碼來源:seqtoseq.py

示例4: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def __init__(self,
               num_tokens,
               max_output_length,
               embedding_dimension=196,
               filter_sizes=[9, 9, 10],
               kernel_sizes=[9, 9, 11],
               decoder_dimension=488,
               **kwargs):
    """
    Parameters
    ----------
    filter_sizes: list of int
      Number of filters for each 1D convolution in the encoder
    kernel_sizes: list of int
      Kernel size for each 1D convolution in the encoder
    decoder_dimension: int
      Number of channels for the GRU Decoder
    """
    if len(filter_sizes) != len(kernel_sizes):
      raise ValueError("Must have same number of layers and kernels")
    self._filter_sizes = filter_sizes
    self._kernel_sizes = kernel_sizes
    self._decoder_dimension = decoder_dimension
    super(AspuruGuzikAutoEncoder, self).__init__(
        input_tokens=num_tokens,
        output_tokens=num_tokens,
        max_output_length=max_output_length,
        embedding_dimension=embedding_dimension,
        variational=True,
        reverse_input=False,
        **kwargs) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:33,代碼來源:seqtoseq.py

示例5: _rnn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def _rnn(dim=1000, classes=10, dropout=0.6):
    """recurrent model"""
    _model = Sequential()
    _model.add(Embedding(dim, 64))
    _model.add(GRU(64))
    _model.add(Dense(64, activation="relu"))
    _model.add(Dropout(dropout))
    _model.add(Dense(10, activation="sigmoid"))
    return _model 
開發者ID:intel,項目名稱:stacks-usecase,代碼行數:11,代碼來源:train.py

示例6: build_majority

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def build_majority(feature_len, num_classes, gru_size=128,
                   classify_activation='softmax', time_steps=None,
                   allow_cudnn=True):
    """Build a mock model that simply sums counts.

    :param feature_len: int, number of features for each pileup column.
    :param num_classes: int, number of output class labels.
    :param gru_size: int, size of each GRU layer.
    :param classify_activation: str, activation to use in classification layer.
    :param time_steps: int, number of pileup columns in a sample.
    :param allow_cudnn: bool, opt-in to cudnn when using a GPU.

    :returns: `keras.models.Sequential` object.

    """
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Lambda, Activation

    def sum_counts(f):
        """Sum forward and reverse counts."""
        # TODO write to handle multiple dtypes
        # acgtACGTdD
        # sum base counts
        b = f[:, :, 0:4] + f[:, :, 4:8]
        # sum deletion counts (indexing in this way retains correct shape)
        d = f[:, :, 8:9] + f[:, :, 9:10]
        return tf.concat([d, b], axis=-1)

    model = Sequential()
    model.add(Lambda(sum_counts, output_shape=(time_steps, num_classes)))
    model.add(Activation('softmax'))
    return model 
開發者ID:nanoporetech,項目名稱:medaka,代碼行數:35,代碼來源:models.py

示例7: create_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def create_model(self) -> Model:
        input = []
        if self.use_matrix:
            for i in range(self.num_context_turns + 1):
                input.append(Input(shape=(self.max_sequence_length,)))
            context = input[:self.num_context_turns]
            response = input[-1]
            emb_layer = self.embedding_layer()
            emb_c = [emb_layer(el) for el in context]
            emb_r = emb_layer(response)
        else:
            for i in range(self.num_context_turns + 1):
                input.append(Input(shape=(self.max_sequence_length, self.embedding_dim,)))
            context = input[:self.num_context_turns]
            response = input[-1]
            emb_c = context
            emb_r = response
        lstm_layer = self.lstm_layer()
        lstm_c = [lstm_layer(el) for el in emb_c]
        lstm_r = lstm_layer(emb_r)
        pooling_layer = GlobalMaxPooling1D(name="pooling")
        lstm_c = [pooling_layer(el) for el in lstm_c]
        lstm_r = pooling_layer(lstm_r)
        lstm_c = [Lambda(lambda x: K.expand_dims(x, 1))(el) for el in lstm_c]
        lstm_c = Lambda(lambda x: K.concatenate(x, 1))(lstm_c)
        gru_layer = GRU(2 * self.hidden_dim, name="gru")
        gru_c = gru_layer(lstm_c)

        if self.triplet_mode:
            dist = Lambda(self._pairwise_distances)([gru_c, lstm_r])
        else:
            dist = Lambda(self._diff_mult_dist)([gru_c, lstm_r])
            dist = Dense(1, activation='sigmoid', name="score_model")(dist)
        model = Model(context + [response], dist)
        return model 
開發者ID:deepmipt,項目名稱:DeepPavlov,代碼行數:37,代碼來源:bilstm_gru_siamese_network.py

示例8: build

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        shape = input_node.shape.as_list()
        if len(shape) != 3:
            raise ValueError(
                'Expect the input tensor to have '
                'at least 3 dimensions for rnn models, '
                'but got {shape}'.format(shape=input_node.shape))

        feature_size = shape[-1]
        output_node = input_node

        bidirectional = self.bidirectional
        if bidirectional is None:
            bidirectional = hp.Boolean('bidirectional', default=True)
        layer_type = self.layer_type or hp.Choice('layer_type',
                                                  ['gru', 'lstm'],
                                                  default='lstm')
        num_layers = self.num_layers or hp.Choice('num_layers',
                                                  [1, 2, 3],
                                                  default=2)
        rnn_layers = {
            'gru': layers.GRU,
            'lstm': layers.LSTM
        }
        in_layer = rnn_layers[layer_type]
        for i in range(num_layers):
            return_sequences = True
            if i == num_layers - 1:
                return_sequences = self.return_sequences
            if bidirectional:
                output_node = layers.Bidirectional(
                    in_layer(feature_size,
                             return_sequences=return_sequences))(output_node)
            else:
                output_node = in_layer(
                    feature_size,
                    return_sequences=return_sequences)(output_node)
        return output_node 
開發者ID:keras-team,項目名稱:autokeras,代碼行數:43,代碼來源:basic.py

示例9: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def __init__(self,
               char_to_idx,
               n_tasks=10,
               max_seq_len=270,
               embedding_dim=50,
               n_classes=2,
               use_bidir=True,
               use_conv=True,
               filters=192,
               kernel_size=3,
               strides=1,
               rnn_sizes=[224, 384],
               rnn_types=["GRU", "GRU"],
               mode="regression",
               **kwargs):
    """
    Parameters
    ----------
    char_to_idx: dict,
        char_to_idx contains character to index mapping for SMILES characters
    embedding_dim: int, default 50
        Size of character embeddings used.
    use_bidir: bool, default True
        Whether to use BiDirectional RNN Cells
    use_conv: bool, default True
        Whether to use a conv-layer
    kernel_size: int, default 3
        Kernel size for convolutions
    filters: int, default 192
        Number of filters
    strides: int, default 1
        Strides used in convolution
    rnn_sizes: list[int], default [224, 384]
        Number of hidden units in the RNN cells
    mode: str, default regression
        Whether to use model for regression or classification
    """

    self.char_to_idx = char_to_idx
    self.n_classes = n_classes
    self.max_seq_len = max_seq_len
    self.embedding_dim = embedding_dim
    self.use_bidir = use_bidir
    self.use_conv = use_conv
    if use_conv:
      self.kernel_size = kernel_size
      self.filters = filters
      self.strides = strides
    self.rnn_types = rnn_types
    self.rnn_sizes = rnn_sizes
    assert len(rnn_sizes) == len(
        rnn_types), "Should have same number of hidden units as RNNs"
    self.n_tasks = n_tasks
    self.mode = mode

    model, loss, output_types = self._build_graph()
    super(Smiles2Vec, self).__init__(
        model=model, loss=loss, output_types=output_types, **kwargs) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:60,代碼來源:chemnet_models.py

示例10: build_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import GRU [as 別名]
def build_model(feature_len, num_classes, gru_size=128,
                classify_activation='softmax', time_steps=None,
                allow_cudnn=True):
    """Build a bidirectional GRU model with CuDNNGRU support.

    CuDNNGRU implementation is claimed to give speed-up on GPU of 7x.
    The function will build a model capable of running on GPU with
    CuDNNGRU provided a) a GPU is present, b) the option has been
    allowed by the `allow_cudnn` argument; otherwise a compatible
    (but not CuDNNGRU accelerated model) is built.

    :param feature_len: int, number of features for each pileup column.
    :param num_classes: int, number of output class labels.
    :param gru_size: int, size of each GRU layer.
    :param classify_activation: str, activation to use in classification layer.
    :param time_steps: int, number of pileup columns in a sample.
    :param allow_cudnn: bool, opt-in to cudnn when using a GPU.

    :returns: `keras.models.Sequential` object.

    """
    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, GRU, CuDNNGRU, Bidirectional

    # Determine whether to use CuDNNGRU or not
    cudnn = False
    if tf.test.is_gpu_available(cuda_only=True) and allow_cudnn:
        cudnn = True
    logger.info("Building model with cudnn optimization: {}".format(cudnn))

    model = Sequential()
    input_shape = (time_steps, feature_len)
    for i in [1, 2]:
        name = 'gru{}'.format(i)
        # Options here are to be mutually compatible: train with CuDNNGRU
        # but allow inference with GRU (on cpu).
        # https://gist.github.com/bzamecnik/bd3786a074f8cb891bc2a397343070f1
        if cudnn:
            gru = CuDNNGRU(gru_size, return_sequences=True, name=name)
        else:
            gru = GRU(
                gru_size, reset_after=True, recurrent_activation='sigmoid',
                return_sequences=True, name=name)
        model.add(Bidirectional(gru, input_shape=input_shape))

    # see keras #10417 for why we specify input shape
    model.add(Dense(
        num_classes, activation=classify_activation, name='classify',
        input_shape=(time_steps, 2 * gru_size)
    ))

    return model 
開發者ID:nanoporetech,項目名稱:medaka,代碼行數:55,代碼來源:models.py


注:本文中的tensorflow.keras.layers.GRU屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。