當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.RNN屬性代碼示例

本文整理匯總了Python中torch.nn.RNN屬性的典型用法代碼示例。如果您正苦於以下問題:Python nn.RNN屬性的具體用法?Python nn.RNN怎麽用?Python nn.RNN使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在torch.nn的用法示例。


在下文中一共展示了nn.RNN屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, input_size=50, hidden_size=256, dropout=0, bidirectional=False, num_layers=1, activation_function="tanh"):
        """
        Args:
            input_size: dimention of input embedding
            hidden_size: hidden size
            dropout: dropout layer on the outputs of each RNN layer except the last layer
            bidirectional: if it is a bidirectional RNN
            num_layers: number of recurrent layers
            activation_function: the activation function of RNN, tanh/relu
        """
        super().__init__()
        if bidirectional:
            hidden_size /= 2
        self.rnn = nn.RNN(input_size, 
                          hidden_size, 
                          num_layers, 
                          nonlinearity=activation_function, 
                          dropout=dropout, 
                          bidirectional=bidirectional) 
開發者ID:thunlp,項目名稱:OpenNRE,代碼行數:21,代碼來源:rnn.py

示例2: whatCellType

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def whatCellType(input_size, hidden_size, cell_type, dropout_rate):
    if cell_type == 'rnn':
        cell = nn.RNN(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'gru':
        cell = nn.GRU(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'lstm':
        cell = nn.LSTM(input_size, hidden_size, dropout=dropout_rate, batch_first=False)
        init_lstm(cell)
        return cell
    elif cell_type == 'bigru':
        cell = nn.GRU(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
        init_gru(cell)
        return cell
    elif cell_type == 'bilstm':
        cell = nn.LSTM(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)
        init_lstm(cell)
        return cell 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:23,代碼來源:model.py

示例3: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def forward(self, inputs, hidden):
        def select_layer(h_state, i):  # To work on both LSTM / GRU, RNN
            if isinstance(h_state, tuple):
                return tuple([select_layer(s, i) for s in h_state])
            else:
                return h_state[i]

        next_hidden = []
        for i, layer in enumerate(self.layers):
            next_hidden_i = layer(inputs, select_layer(hidden, i))
            output = next_hidden_i[0] if isinstance(next_hidden_i, tuple) \
                else next_hidden_i
            if i + 1 < self.num_layers:
                output = self.dropout(output)
            if self.residual and inputs.size(-1) == output.size(-1):
                inputs = output + inputs
            else:
                inputs = output
            next_hidden.append(next_hidden_i)
        if isinstance(hidden, tuple):
            next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)])
        else:
            next_hidden = torch.stack(next_hidden)
        return inputs, next_hidden 
開發者ID:nadavbh12,項目名稱:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代碼行數:26,代碼來源:recurrent.py

示例4: auto_rnn

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def auto_rnn(self, rnn: nn.RNN, seqs, lengths, batch_first=True, init_state=None, output_last_states=False):
        batch_size = seqs.size(0) if batch_first else seqs.size(1)
        state_shape = get_state_shape(rnn, batch_size, rnn.bidirectional)

        if not init_state:
            h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
        else:
            h0 = init_state['h0'].expand(state_shape)
            c0 = init_state['c0'].expand(state_shape)

        packed_pinputs, r_index = pack_for_rnn_seq(seqs, lengths, batch_first)
        if self.args.cell_type == 'gru':
            output, hn = rnn(packed_pinputs, h0)
        else:
            output, (hn, cn) = rnn(packed_pinputs, (h0, c0))
        output = unpack_from_rnn_seq(output, r_index, batch_first)

        if not output_last_states:
            return output
        else:
            return output, (hn, cn) 
開發者ID:ramakanth-pasunuru,項目名稱:video_captioning_rl,代碼行數:23,代碼來源:esim.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, options,inp_dim):
        super(RNN_cudnn, self).__init__()
        
        self.input_dim=inp_dim
        self.hidden_size=int(options['hidden_size'])
        self.num_layers=int(options['num_layers'])
        self.nonlinearity=options['nonlinearity']
        self.bias=bool(strtobool(options['bias']))
        self.batch_first=bool(strtobool(options['batch_first']))
        self.dropout=float(options['dropout'])
        self.bidirectional=bool(strtobool(options['bidirectional']))
        
        self.rnn = nn.ModuleList([nn.RNN(self.input_dim, self.hidden_size, self.num_layers, 
                            nonlinearity=self.nonlinearity,bias=self.bias,dropout=self.dropout,bidirectional=self.bidirectional)])
         
        self.out_dim=self.hidden_size+self.bidirectional*self.hidden_size 
開發者ID:santi-pdp,項目名稱:pase,代碼行數:18,代碼來源:neural_networks.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, unit, input_dim, increase_rate, droprate, layer_drop = 0):
        super(BasicUnit, self).__init__()

        rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}

        self.unit = unit

        self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)

        if 'lstm' == self.unit:
            utils.init_lstm(self.layer)

        self.layer_drop = layer_drop

        self.droprate = droprate

        self.input_dim = input_dim
        self.increase_rate = increase_rate
        self.output_dim = input_dim + increase_rate

        self.init_hidden() 
開發者ID:LiyuanLucasLiu,項目名稱:RAdam,代碼行數:23,代碼來源:ldnet.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, unit, input_dim, increase_rate, droprate):
        super(BasicUnit, self).__init__()

        rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}

        self.unit = unit

        self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)

        if 'lstm' == self.unit:
            utils.init_lstm(self.layer)

        self.droprate = droprate

        self.input_dim = input_dim
        self.increase_rate = increase_rate
        self.output_dim = input_dim + increase_rate

        self.init_hidden() 
開發者ID:LiyuanLucasLiu,項目名稱:RAdam,代碼行數:21,代碼來源:densenet.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, unit, unit_number, emb_dim, hid_dim, droprate):
        super(BasicUnit, self).__init__()

        rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU, 'bnlstm': BNLSTM}

        self.batch_norm = (unit == 'bnlstm')

        self.unit_number = unit_number
        # self.unit_weight = nn.Parameter(torch.FloatTensor([1] * unit_number))

        self.unit_list = nn.ModuleList()
        self.unit_list.append(rnnunit_map[unit](emb_dim, hid_dim, 1))
        if unit_number > 1:
            self.unit_list.extend([rnnunit_map[unit](hid_dim, hid_dim, 1) for ind in range(unit_number - 1)])

        self.droprate = droprate

        self.output_dim = emb_dim + hid_dim * unit_number

        self.init_hidden() 
開發者ID:LiyuanLucasLiu,項目名稱:RAdam,代碼行數:22,代碼來源:ddnet.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, c_num, c_dim, c_hidden, c_layer, w_num, w_dim, w_hidden, w_layer, y_num, droprate, unit='lstm'):
        super(Vanilla_SeqLabel, self).__init__()

        rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}

        self.char_embed = nn.Embedding(c_num, c_dim)
        self.word_embed = nn.Embedding(w_num, w_dim)

        self.char_seq = nn.Linear(c_hidden * 2, w_dim)

        self.c_hidden = c_hidden
        self.unit_type = unit

        tmp_rnn_dropout = droprate if c_layer > 1 else 0
        self.char_fw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = tmp_rnn_dropout)
        self.char_bw = rnnunit_map[unit](c_dim, c_hidden, c_layer, dropout = tmp_rnn_dropout)

        tmp_rnn_dropout = droprate if w_layer > 1 else 0
        self.word_rnn = rnnunit_map[unit](w_dim * 2, w_hidden // 2, w_layer, dropout = tmp_rnn_dropout, bidirectional = True)

        self.y_num = y_num
        self.crf = CRF(w_hidden, y_num)

        self.drop = nn.Dropout(p = droprate) 
開發者ID:LiyuanLucasLiu,項目名稱:Vanilla_NER,代碼行數:26,代碼來源:seqlabel.py

示例10: get_state_shape

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def get_state_shape(rnn: nn.RNN, batch_size, bidirectional=False):
    """
    Return the state shape of a given RNN. This is helpful when you want to create a init state for RNN.

    Example:
    c0 = h0 = Variable(src_seq_p.data.new(*get_state_shape([your rnn], 3, bidirectional)).zero_())
    
    :param rnn: nn.LSTM, nn.GRU or subclass of nn.RNN
    :param batch_size:  
    :param bidirectional:  
    :return: 
    """
    if bidirectional:
        return rnn.num_layers * 2, batch_size, rnn.hidden_size
    else:
        return rnn.num_layers, batch_size, rnn.hidden_size 
開發者ID:easonnie,項目名稱:combine-FEVER-NSMN,代碼行數:18,代碼來源:torch_util.py

示例11: auto_rnn

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def auto_rnn(rnn: nn.RNN, seqs, lengths, batch_first=True, init_state=None, output_last_states=False):
    batch_size = seqs.size(0) if batch_first else seqs.size(1)
    state_shape = get_state_shape(rnn, batch_size, rnn.bidirectional)

    # if init_state is None:
    #     h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
    # else:
    #     h0 = init_state[0] # rnn.num_layers, batch_size, rnn.hidden_size
    #     c0 = init_state[1]

    packed_pinputs, r_index, init_state = pack_for_rnn_seq(seqs, lengths, batch_first, init_state)

    if len(init_state) == 0:
        h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
        init_state = (h0, c0)

    output, last_state = rnn(packed_pinputs, init_state)
    output = unpack_from_rnn_seq(output, r_index, batch_first)

    if not output_last_states:
        return output
    else:
        last_state = reverse_indice_for_state(last_state, r_index)
        return output, last_state 
開發者ID:easonnie,項目名稱:combine-FEVER-NSMN,代碼行數:26,代碼來源:torch_util.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, parameters):
        super(TrajPreSimple, self).__init__()
        self.loc_size = parameters.loc_size
        self.loc_emb_size = parameters.loc_emb_size
        self.tim_size = parameters.tim_size
        self.tim_emb_size = parameters.tim_emb_size
        self.hidden_size = parameters.hidden_size
        self.use_cuda = parameters.use_cuda
        self.rnn_type = parameters.rnn_type

        self.emb_loc = nn.Embedding(self.loc_size, self.loc_emb_size)
        self.emb_tim = nn.Embedding(self.tim_size, self.tim_emb_size)

        input_size = self.loc_emb_size + self.tim_emb_size

        if self.rnn_type == 'GRU':
            self.rnn = nn.GRU(input_size, self.hidden_size, 1)
        elif self.rnn_type == 'LSTM':
            self.rnn = nn.LSTM(input_size, self.hidden_size, 1)
        elif self.rnn_type == 'RNN':
            self.rnn = nn.RNN(input_size, self.hidden_size, 1)
        self.init_weights()

        self.fc = nn.Linear(self.hidden_size, self.loc_size)
        self.dropout = nn.Dropout(p=parameters.dropout_p) 
開發者ID:vonfeng,項目名稱:DeepMove,代碼行數:27,代碼來源:model.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def __init__(self, n_input, n_hidden, n_layers, dropout=0, cell_type='GRU', batch_first=False):
        super(DRNN, self).__init__()

        self.dilations = [2 ** i for i in range(n_layers)]
        self.cell_type = cell_type
        self.batch_first = batch_first

        layers = []
        if self.cell_type == "GRU":
            cell = nn.GRU
        elif self.cell_type == "RNN":
            cell = nn.RNN
        elif self.cell_type == "LSTM":
            cell = nn.LSTM
        else:
            raise NotImplementedError

        for i in range(n_layers):
            if i == 0:
                c = cell(n_input, n_hidden, dropout=dropout)
            else:
                c = cell(n_hidden, n_hidden, dropout=dropout)
            layers.append(c)
        self.cells = nn.Sequential(*layers) 
開發者ID:zalandoresearch,項目名稱:pytorch-dilated-rnn,代碼行數:26,代碼來源:drnn.py

示例14: setUp

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def setUp(self):
        super(TestEncoderBase, self).setUp()
        self.lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
        self.rnn = RNN(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
        self.encoder_base = _EncoderBase(stateful=True)

        tensor = torch.rand([5, 7, 3])
        tensor[1, 6:, :] = 0
        tensor[3, 2:, :] = 0
        self.tensor = tensor
        mask = torch.ones(5, 7)
        mask[1, 6:] = 0
        mask[2, :] = 0  # <= completely masked
        mask[3, 2:] = 0
        mask[4, :] = 0  # <= completely masked
        self.mask = mask

        self.batch_size = 5
        self.num_valid = 3
        sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
        _, _, restoration_indices, sorting_indices = sort_batch_by_length(tensor, sequence_lengths)
        self.sorting_indices = sorting_indices
        self.restoration_indices = restoration_indices 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:25,代碼來源:encoder_base_test.py

示例15: test_non_contiguous_input_states_handled

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import RNN [as 別名]
def test_non_contiguous_input_states_handled(self):
        # Check that the encoder is robust to non-contiguous input states.

        # A transposition will make the tensors non-contiguous, start them off at the wrong shape
        # and transpose them into the right shape.
        encoder_base = _EncoderBase(stateful=False)
        initial_states = (torch.randn(5, 6, 7).permute(1, 0, 2),
                          torch.randn(5, 6, 7).permute(1, 0, 2))
        assert not initial_states[0].is_contiguous() and not initial_states[1].is_contiguous()
        assert initial_states[0].size() == torch.Size([6, 5, 7])
        assert initial_states[1].size() == torch.Size([6, 5, 7])

        # We'll pass them through an LSTM encoder and a vanilla RNN encoder to make sure it works
        # whether the initial states are a tuple of tensors or just a single tensor.
        encoder_base.sort_and_run_forward(self.lstm, self.tensor, self.mask, initial_states)
        encoder_base.sort_and_run_forward(self.rnn, self.tensor, self.mask, initial_states[0]) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:18,代碼來源:encoder_base_test.py


注:本文中的torch.nn.RNN屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。