當前位置: 首頁>>代碼示例>>Python>>正文


Python dynet.LSTMBuilder方法代碼示例

本文整理匯總了Python中dynet.LSTMBuilder方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.LSTMBuilder方法的具體用法?Python dynet.LSTMBuilder怎麽用?Python dynet.LSTMBuilder使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dynet的用法示例。


在下文中一共展示了dynet.LSTMBuilder方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: initParams

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import LSTMBuilder [as 別名]
def initParams(self,model,Cemb,options):
        # initialize the model parameters  
        params = dict()
        params['embed'] = model.add_lookup_parameters(Cemb.shape)
        for row_num,vec in enumerate(Cemb):
            params['embed'].init_row(row_num, vec)
        params['lstm'] = dy.LSTMBuilder(1,options['word_dims'],options['nhiddens'],model)
        
        params['reset_gate_W'] = []
        params['reset_gate_b'] = []
        params['com_W'] = []
        params['com_b'] = []

        params['word_score_U'] = model.add_parameters(options['word_dims'])
        params['predict_W'] = model.add_parameters((options['word_dims'],options['nhiddens']))
        params['predict_b'] = model.add_parameters(options['word_dims'])
        for wlen in xrange(1,options['max_word_len']+1):
            params['reset_gate_W'].append(model.add_parameters((wlen*options['char_dims'],wlen*options['char_dims'])))
            params['reset_gate_b'].append(model.add_parameters(wlen*options['char_dims']))
            params['com_W'].append(model.add_parameters((options['word_dims'],wlen*options['char_dims'])))
            params['com_b'].append(model.add_parameters(options['word_dims']))
        params['<BoS>'] = model.add_parameters(options['word_dims'])
        return params 
開發者ID:jcyk,項目名稱:greedyCWS,代碼行數:25,代碼來源:model.py

示例2: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import LSTMBuilder [as 別名]
def __init__(self, c2i, num_lstm_layers=DEFAULT_LSTM_LAYERS,\
                char_dim=DEFAULT_CHAR_DIM, hidden_dim=DEFAULT_HIDDEN_DIM,\
                word_embedding_dim=DEFAULT_WORD_DIM, file=None):
        self.c2i = c2i
        self.model = dy.Model()

        # Char LSTM Parameters
        self.char_lookup = self.model.add_lookup_parameters((len(c2i), char_dim), name="ce")
        self.char_fwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)
        self.char_bwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)

        # Post-LSTM Parameters
        self.lstm_to_rep_params = self.model.add_parameters((word_embedding_dim, hidden_dim * 2), name="H")
        self.lstm_to_rep_bias = self.model.add_parameters(word_embedding_dim, name="Hb")
        self.mlp_out = self.model.add_parameters((word_embedding_dim, word_embedding_dim), name="O")
        self.mlp_out_bias = self.model.add_parameters(word_embedding_dim, name="Ob")

        if file is not None:
            # read from saved file; see old_load() for dynet 1.0 format
            ### NOTE - dynet 2.0 only supports explicit loading into params, so
            ### dimensionalities all need to be specified in init
            self.model.populate(file) 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:24,代碼來源:model.py

示例3: init_params

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import LSTMBuilder [as 別名]
def init_params(self):
        super().init_params()

        self.entity_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3))  # e N e
        self.relation_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3))  # N e N
        self.no_ent = self.pc.add_parameters(self.embedding_size)

        self.vocab.create_lookup(self.pc, self.embedding_size)
        self.counters.create_lookup(self.pc, self.counter_size)
        self.decoder = dy.LSTMBuilder(3, self.embedding_size + self.counter_size * 4, self.embedding_size, self.pc) 
開發者ID:AmitMY,項目名稱:chimera,代碼行數:12,代碼來源:neural_planner.py

示例4: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import LSTMBuilder [as 別名]
def __init__(self, tagset_sizes, num_lstm_layers, hidden_dim, word_embeddings, no_we_update, use_char_rnn, charset_size, char_embedding_dim, att_props=None, vocab_size=None, word_embedding_dim=None):
        '''
        :param tagset_sizes: dictionary of attribute_name:number_of_possible_tags
        :param num_lstm_layers: number of desired LSTM layers
        :param hidden_dim: size of hidden dimension (same for all LSTM layers, including character-level)
        :param word_embeddings: pre-trained list of embeddings, assumes order by word ID (optional)
        :param no_we_update: if toggled, don't update embeddings
        :param use_char_rnn: use "char->tag" option, i.e. concatenate character-level LSTM outputs to word representations (and train underlying LSTM). Only 1-layer is supported.
        :param charset_size: number of characters expected in dataset (needed for character embedding initialization)
        :param char_embedding_dim: desired character embedding dimension
        :param att_props: proportion of loss to assign each attribute for back-propagation weighting (optional)
        :param vocab_size: number of words in model (ignored if pre-trained embeddings are given)
        :param word_embedding_dim: desired word embedding dimension (ignored if pre-trained embeddings are given)
        '''
        self.model = dy.Model()
        self.tagset_sizes = tagset_sizes
        self.attributes = list(tagset_sizes.keys())
        self.we_update = not no_we_update
        if att_props is not None:
            self.att_props = defaultdict(float, {att:(1.0-p) for att,p in att_props.items()})
        else:
            self.att_props = None

        if word_embeddings is not None: # Use pretrained embeddings
            vocab_size = word_embeddings.shape[0]
            word_embedding_dim = word_embeddings.shape[1]

        self.words_lookup = self.model.add_lookup_parameters((vocab_size, word_embedding_dim), name="we")

        if word_embeddings is not None:
            self.words_lookup.init_from_array(word_embeddings)

        # Char LSTM Parameters
        self.use_char_rnn = use_char_rnn
        self.char_hidden_dim = hidden_dim
        if use_char_rnn:
            self.char_lookup = self.model.add_lookup_parameters((charset_size, char_embedding_dim), name="ce")
            self.char_bi_lstm = dy.BiRNNBuilder(1, char_embedding_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Word LSTM parameters
        if use_char_rnn:
            input_dim = word_embedding_dim + hidden_dim
        else:
            input_dim = word_embedding_dim
        self.word_bi_lstm = dy.BiRNNBuilder(num_lstm_layers, input_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Matrix that maps from Bi-LSTM output to num tags
        self.lstm_to_tags_params = {}
        self.lstm_to_tags_bias = {}
        self.mlp_out = {}
        self.mlp_out_bias = {}
        for att, set_size in list(tagset_sizes.items()):
            self.lstm_to_tags_params[att] = self.model.add_parameters((set_size, hidden_dim), name=att+"H")
            self.lstm_to_tags_bias[att] = self.model.add_parameters(set_size, name=att+"Hb")
            self.mlp_out[att] = self.model.add_parameters((set_size, set_size), name=att+"O")
            self.mlp_out_bias[att] = self.model.add_parameters(set_size, name=att+"Ob") 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:58,代碼來源:model.py


注:本文中的dynet.LSTMBuilder方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。