当前位置: 首页>>代码示例>>Python>>正文


Python dynet.LSTMBuilder方法代码示例

本文整理汇总了Python中dynet.LSTMBuilder方法的典型用法代码示例。如果您正苦于以下问题:Python dynet.LSTMBuilder方法的具体用法?Python dynet.LSTMBuilder怎么用?Python dynet.LSTMBuilder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在dynet的用法示例。


在下文中一共展示了dynet.LSTMBuilder方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: initParams

# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import LSTMBuilder [as 别名]
def initParams(self,model,Cemb,options):
        # initialize the model parameters  
        params = dict()
        params['embed'] = model.add_lookup_parameters(Cemb.shape)
        for row_num,vec in enumerate(Cemb):
            params['embed'].init_row(row_num, vec)
        params['lstm'] = dy.LSTMBuilder(1,options['word_dims'],options['nhiddens'],model)
        
        params['reset_gate_W'] = []
        params['reset_gate_b'] = []
        params['com_W'] = []
        params['com_b'] = []

        params['word_score_U'] = model.add_parameters(options['word_dims'])
        params['predict_W'] = model.add_parameters((options['word_dims'],options['nhiddens']))
        params['predict_b'] = model.add_parameters(options['word_dims'])
        for wlen in xrange(1,options['max_word_len']+1):
            params['reset_gate_W'].append(model.add_parameters((wlen*options['char_dims'],wlen*options['char_dims'])))
            params['reset_gate_b'].append(model.add_parameters(wlen*options['char_dims']))
            params['com_W'].append(model.add_parameters((options['word_dims'],wlen*options['char_dims'])))
            params['com_b'].append(model.add_parameters(options['word_dims']))
        params['<BoS>'] = model.add_parameters(options['word_dims'])
        return params 
开发者ID:jcyk,项目名称:greedyCWS,代码行数:25,代码来源:model.py

示例2: __init__

# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import LSTMBuilder [as 别名]
def __init__(self, c2i, num_lstm_layers=DEFAULT_LSTM_LAYERS,\
                char_dim=DEFAULT_CHAR_DIM, hidden_dim=DEFAULT_HIDDEN_DIM,\
                word_embedding_dim=DEFAULT_WORD_DIM, file=None):
        self.c2i = c2i
        self.model = dy.Model()

        # Char LSTM Parameters
        self.char_lookup = self.model.add_lookup_parameters((len(c2i), char_dim), name="ce")
        self.char_fwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)
        self.char_bwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)

        # Post-LSTM Parameters
        self.lstm_to_rep_params = self.model.add_parameters((word_embedding_dim, hidden_dim * 2), name="H")
        self.lstm_to_rep_bias = self.model.add_parameters(word_embedding_dim, name="Hb")
        self.mlp_out = self.model.add_parameters((word_embedding_dim, word_embedding_dim), name="O")
        self.mlp_out_bias = self.model.add_parameters(word_embedding_dim, name="Ob")

        if file is not None:
            # read from saved file; see old_load() for dynet 1.0 format
            ### NOTE - dynet 2.0 only supports explicit loading into params, so
            ### dimensionalities all need to be specified in init
            self.model.populate(file) 
开发者ID:yuvalpinter,项目名称:Mimick,代码行数:24,代码来源:model.py

示例3: init_params

# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import LSTMBuilder [as 别名]
def init_params(self):
        super().init_params()

        self.entity_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3))  # e N e
        self.relation_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3))  # N e N
        self.no_ent = self.pc.add_parameters(self.embedding_size)

        self.vocab.create_lookup(self.pc, self.embedding_size)
        self.counters.create_lookup(self.pc, self.counter_size)
        self.decoder = dy.LSTMBuilder(3, self.embedding_size + self.counter_size * 4, self.embedding_size, self.pc) 
开发者ID:AmitMY,项目名称:chimera,代码行数:12,代码来源:neural_planner.py

示例4: __init__

# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import LSTMBuilder [as 别名]
def __init__(self, tagset_sizes, num_lstm_layers, hidden_dim, word_embeddings, no_we_update, use_char_rnn, charset_size, char_embedding_dim, att_props=None, vocab_size=None, word_embedding_dim=None):
        '''
        :param tagset_sizes: dictionary of attribute_name:number_of_possible_tags
        :param num_lstm_layers: number of desired LSTM layers
        :param hidden_dim: size of hidden dimension (same for all LSTM layers, including character-level)
        :param word_embeddings: pre-trained list of embeddings, assumes order by word ID (optional)
        :param no_we_update: if toggled, don't update embeddings
        :param use_char_rnn: use "char->tag" option, i.e. concatenate character-level LSTM outputs to word representations (and train underlying LSTM). Only 1-layer is supported.
        :param charset_size: number of characters expected in dataset (needed for character embedding initialization)
        :param char_embedding_dim: desired character embedding dimension
        :param att_props: proportion of loss to assign each attribute for back-propagation weighting (optional)
        :param vocab_size: number of words in model (ignored if pre-trained embeddings are given)
        :param word_embedding_dim: desired word embedding dimension (ignored if pre-trained embeddings are given)
        '''
        self.model = dy.Model()
        self.tagset_sizes = tagset_sizes
        self.attributes = list(tagset_sizes.keys())
        self.we_update = not no_we_update
        if att_props is not None:
            self.att_props = defaultdict(float, {att:(1.0-p) for att,p in att_props.items()})
        else:
            self.att_props = None

        if word_embeddings is not None: # Use pretrained embeddings
            vocab_size = word_embeddings.shape[0]
            word_embedding_dim = word_embeddings.shape[1]

        self.words_lookup = self.model.add_lookup_parameters((vocab_size, word_embedding_dim), name="we")

        if word_embeddings is not None:
            self.words_lookup.init_from_array(word_embeddings)

        # Char LSTM Parameters
        self.use_char_rnn = use_char_rnn
        self.char_hidden_dim = hidden_dim
        if use_char_rnn:
            self.char_lookup = self.model.add_lookup_parameters((charset_size, char_embedding_dim), name="ce")
            self.char_bi_lstm = dy.BiRNNBuilder(1, char_embedding_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Word LSTM parameters
        if use_char_rnn:
            input_dim = word_embedding_dim + hidden_dim
        else:
            input_dim = word_embedding_dim
        self.word_bi_lstm = dy.BiRNNBuilder(num_lstm_layers, input_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Matrix that maps from Bi-LSTM output to num tags
        self.lstm_to_tags_params = {}
        self.lstm_to_tags_bias = {}
        self.mlp_out = {}
        self.mlp_out_bias = {}
        for att, set_size in list(tagset_sizes.items()):
            self.lstm_to_tags_params[att] = self.model.add_parameters((set_size, hidden_dim), name=att+"H")
            self.lstm_to_tags_bias[att] = self.model.add_parameters(set_size, name=att+"Hb")
            self.mlp_out[att] = self.model.add_parameters((set_size, set_size), name=att+"O")
            self.mlp_out_bias[att] = self.model.add_parameters(set_size, name=att+"Ob") 
开发者ID:yuvalpinter,项目名称:Mimick,代码行数:58,代码来源:model.py


注:本文中的dynet.LSTMBuilder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。