當前位置: 首頁>>代碼示例>>Python>>正文


Python dynet.Model方法代碼示例

本文整理匯總了Python中dynet.Model方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.Model方法的具體用法?Python dynet.Model怎麽用?Python dynet.Model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dynet的用法示例。


在下文中一共展示了dynet.Model方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_multilinear_forward

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def test_multilinear_forward():
    model = dy.Model()

    a, b, c = np.random.RandomState(0).randn(3, 100)
    ml = MultilinearFactored(n_features=100, n_inputs=3, n_components=5,
                             model=model)
    dy_fwd = ml(dy.inputVector(a),
                dy.inputVector(b),
                dy.inputVector(c)).value()

    U = [dy.parameter(u).value() for u in ml.get_components()]
    expected = np.dot(U[0], a)
    expected *= np.dot(U[1], b)
    expected *= np.dot(U[2], c)
    expected = np.sum(expected)

    assert (expected - dy_fwd) ** 2 < 1e-4 
開發者ID:vene,項目名稱:marseille,代碼行數:19,代碼來源:test_dynet_utils.py

示例2: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, c2i, num_lstm_layers=DEFAULT_LSTM_LAYERS,\
                char_dim=DEFAULT_CHAR_DIM, hidden_dim=DEFAULT_HIDDEN_DIM,\
                word_embedding_dim=DEFAULT_WORD_DIM, file=None):
        self.c2i = c2i
        self.model = dy.Model()

        # Char LSTM Parameters
        self.char_lookup = self.model.add_lookup_parameters((len(c2i), char_dim), name="ce")
        self.char_fwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)
        self.char_bwd_lstm = dy.LSTMBuilder(num_lstm_layers, char_dim, hidden_dim, self.model)

        # Post-LSTM Parameters
        self.lstm_to_rep_params = self.model.add_parameters((word_embedding_dim, hidden_dim * 2), name="H")
        self.lstm_to_rep_bias = self.model.add_parameters(word_embedding_dim, name="Hb")
        self.mlp_out = self.model.add_parameters((word_embedding_dim, word_embedding_dim), name="O")
        self.mlp_out_bias = self.model.add_parameters(word_embedding_dim, name="Ob")

        if file is not None:
            # read from saved file; see old_load() for dynet 1.0 format
            ### NOTE - dynet 2.0 only supports explicit loading into params, so
            ### dimensionalities all need to be specified in init
            self.model.populate(file) 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:24,代碼來源:model.py

示例3: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, params, model=None):
        self.UPSAMPLE_PROJ = 200
        self.RNN_SIZE = 100
        self.RNN_LAYERS = 1
        self.OUTPUT_EMB_SIZE = 200
        self.params = params
        if model is None:
            self.model = dy.Model()
        else:
            self.model = model
        # self.trainer = dy.AdamTrainer(self.model, alpha=2e-3, beta_1=0.9, beta_2=0.9)
        self.trainer = dy.AdamTrainer(self.model)
        # MGCs are extracted at 12.5 ms

        upsample_count = int(12.5 * self.params.target_sample_rate / 1000)
        self.upsample_w_s = []
        self.upsample_w_t = []
        self.upsample_b_s = []
        self.upsample_b_t = []
        for _ in range(upsample_count):
            self.upsample_w_s.append(self.model.add_parameters((self.UPSAMPLE_PROJ, self.params.mgc_order)))
            self.upsample_w_t.append(self.model.add_parameters((self.UPSAMPLE_PROJ, self.params.mgc_order)))
            self.upsample_b_s.append(self.model.add_parameters((self.UPSAMPLE_PROJ)))
            self.upsample_b_t.append(self.model.add_parameters((self.UPSAMPLE_PROJ)))

        self.output_lookup = self.model.add_lookup_parameters((256, self.OUTPUT_EMB_SIZE))
        from models.utils import orthonormal_VanillaLSTMBuilder
        # self.rnn = orthonormal_VanillaLSTMBuilder(self.RNN_LAYERS, self.OUTPUT_EMB_SIZE + self.UPSAMPLE_PROJ, self.RNN_SIZE, self.model)
        self.rnn = dy.VanillaLSTMBuilder(self.RNN_LAYERS, self.OUTPUT_EMB_SIZE + self.UPSAMPLE_PROJ,
                                         self.RNN_SIZE, self.model)
        self.mlp_w = []
        self.mlp_b = []
        self.mlp_w.append(self.model.add_parameters((1024, self.RNN_SIZE)))
        self.mlp_b.append(self.model.add_parameters((1024)))

        self.softmax_w = self.model.add_parameters((256, 1024))
        self.softmax_b = self.model.add_parameters((256)) 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:39,代碼來源:vocoder_old.py

示例4: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self,Cemb,character_idx_map,options):
        model = dy.Model()
        self.trainer = dy.MomentumSGDTrainer(model,options['lr'],options['momentum'],options['edecay']) # we use Momentum SGD
        self.params = self.initParams(model,Cemb,options)
        self.options = options
        self.model = model
        self.character_idx_map = character_idx_map
        self.known_words = None 
開發者ID:jcyk,項目名稱:greedyCWS,代碼行數:10,代碼來源:model.py

示例5: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, rnn_model, use_char_rnn):
        self.use_char_rnn = use_char_rnn

        self.model = dy.Model()
        att_tuple = iter(self.model.load(rnn_model))
        self.attributes = open(rnn_model + "-atts", "r").read().split("\t")
        self.words_lookup = att_tuple.next()
        if (self.use_char_rnn):
            self.char_lookup = att_tuple.next()
            self.char_bi_lstm = att_tuple.next()
        self.word_bi_lstm = att_tuple.next()
        self.lstm_to_tags_params = get_next_att_batch(self.attributes, att_tuple)
        self.lstm_to_tags_bias = get_next_att_batch(self.attributes, att_tuple)
        self.mlp_out = get_next_att_batch(self.attributes, att_tuple)
        self.mlp_out_bias = get_next_att_batch(self.attributes, att_tuple) 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:17,代碼來源:test_model.py

示例6: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, graphs, embeddings, mode=TRANSLATIONAL_EMBED_MODE, dropout=0.0, model_path=None):
        """
        :param graphs: dictionary of <relation:CSR-format graph>s, node-aligned
        :param embeddings: list of numpy array embeddings, indices aligned to nodes
        :param mode: mode of calculating association score, options: {}
        """.format(MODES_STR)
        # input validation
        graph_sizes = list(set([g.shape[0] for g in list(graphs.values())]))
        assert len(graph_sizes) == 1
        assert len(embeddings) == graph_sizes[0], '{} != {}'.format(len(embeddings), graph_sizes[0])
        
        # raw members
        self.graphs = {canonicalize_name(k):g for k,g in list(graphs.items())}
        self.mode = mode
        
        # documenationy members
        self.relation_names = sorted(self.graphs.keys())
        if 'co_hypernym' in self.relation_names:
            self.relation_names.remove('co_hypernym')
        self.vocab_size = graph_sizes[0]
        self.R = len(self.relation_names)
        self.emb_dim = len(embeddings[0])
        self.dropout = dropout

        # model members
        self.model = dy.Model()
        # TODO consider using no_update param for embeddings
        self.embeddings = self.model.add_lookup_parameters((self.vocab_size, self.emb_dim))
        self.embeddings.init_from_array(embeddings)
        
        # init association parameter
        self.no_assoc = False # so can be overriden in inheritors
        
        # first determine 
        if self.mode == BILINEAR_MODE:              # full-rank bilinear matrix
            assoc_dim = (self.emb_dim, self.emb_dim)
        elif self.mode == DIAG_RANK1_MODE:          # diagonal bilinear matrix + rank 1 matrix
            # first row = diagonal
            # second row = 'source factor'
            # third row = 'target factor'
            assoc_dim = (3, self.emb_dim)
        elif self.mode == TRANSLATIONAL_EMBED_MODE: # additive relational vector
            assoc_dim = self.emb_dim
        elif self.mode == DISTMULT:                 # diagonal bilinear matrix
            assoc_dim = self.emb_dim
        else:
            raise ValueError('unsupported mode: {}. allowed are {}'\
                             .format(self.mode, ', '.join(MODES_STR)))
            
        # init actual parameter
        self.word_assoc_weights = {r:self.model.add_parameters(assoc_dim) for r in self.relation_names}
        if model_path is not None:
            self.model.populate(model_path + '.dyn')
        
        timeprint('finished initialization for association model.') 
開發者ID:yuvalpinter,項目名稱:m3gm,代碼行數:57,代碼來源:pretrain_assoc.py

示例7: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, encodings):
        self.losses = []
        self.model = dy.Model()
        self.trainer = dy.AdamTrainer(self.model, alpha=2e-3, beta_1=0.9, beta_2=0.9)
        self.encodings = encodings

        self.DECODER_SIZE = 100
        self.ENCODER_SIZE = 100
        self.CHAR_EMB_SIZE = 100
        self.HIDDEN_SIZE = 100
        self.lexicon = {}

        self.char_lookup = self.model.add_lookup_parameters((len(self.encodings.char2int), self.CHAR_EMB_SIZE))
        self.phoneme_lookup = self.model.add_lookup_parameters(
            (len(self.encodings.phoneme2int) + 1, self.CHAR_EMB_SIZE))  # +1 is for special START

        self.start_lookup = self.model.add_lookup_parameters(
            (1, self.CHAR_EMB_SIZE + self.ENCODER_SIZE * 2))  # START SYMBOL

        self.encoder_fw = []
        self.encoder_bw = []

        input_layer_size = self.CHAR_EMB_SIZE
        for ii in range(2):
            self.encoder_fw.append(dy.VanillaLSTMBuilder(1, input_layer_size, self.ENCODER_SIZE, self.model))
            self.encoder_bw.append(dy.VanillaLSTMBuilder(1, input_layer_size, self.ENCODER_SIZE, self.model))

            input_layer_size = self.ENCODER_SIZE * 2

        self.decoder = dy.VanillaLSTMBuilder(2, self.ENCODER_SIZE * 2 + self.CHAR_EMB_SIZE, self.DECODER_SIZE,
                                             self.model)

        self.att_w1 = self.model.add_parameters((100, self.ENCODER_SIZE * 2))
        self.att_w2 = self.model.add_parameters((100, self.DECODER_SIZE))
        self.att_v = self.model.add_parameters((1, 100))

        self.hidden_w = self.model.add_parameters((self.HIDDEN_SIZE, self.DECODER_SIZE))
        self.hidden_b = self.model.add_parameters((self.HIDDEN_SIZE))

        self.softmax_w = self.model.add_parameters(
            (len(self.encodings.phoneme2int) + 1, self.HIDDEN_SIZE))  # +1 is for EOS
        self.softmax_b = self.model.add_parameters((len(self.encodings.phoneme2int) + 1)) 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:44,代碼來源:g2p.py

示例8: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import Model [as 別名]
def __init__(self, tagset_sizes, num_lstm_layers, hidden_dim, word_embeddings, no_we_update, use_char_rnn, charset_size, char_embedding_dim, att_props=None, vocab_size=None, word_embedding_dim=None):
        '''
        :param tagset_sizes: dictionary of attribute_name:number_of_possible_tags
        :param num_lstm_layers: number of desired LSTM layers
        :param hidden_dim: size of hidden dimension (same for all LSTM layers, including character-level)
        :param word_embeddings: pre-trained list of embeddings, assumes order by word ID (optional)
        :param no_we_update: if toggled, don't update embeddings
        :param use_char_rnn: use "char->tag" option, i.e. concatenate character-level LSTM outputs to word representations (and train underlying LSTM). Only 1-layer is supported.
        :param charset_size: number of characters expected in dataset (needed for character embedding initialization)
        :param char_embedding_dim: desired character embedding dimension
        :param att_props: proportion of loss to assign each attribute for back-propagation weighting (optional)
        :param vocab_size: number of words in model (ignored if pre-trained embeddings are given)
        :param word_embedding_dim: desired word embedding dimension (ignored if pre-trained embeddings are given)
        '''
        self.model = dy.Model()
        self.tagset_sizes = tagset_sizes
        self.attributes = list(tagset_sizes.keys())
        self.we_update = not no_we_update
        if att_props is not None:
            self.att_props = defaultdict(float, {att:(1.0-p) for att,p in att_props.items()})
        else:
            self.att_props = None

        if word_embeddings is not None: # Use pretrained embeddings
            vocab_size = word_embeddings.shape[0]
            word_embedding_dim = word_embeddings.shape[1]

        self.words_lookup = self.model.add_lookup_parameters((vocab_size, word_embedding_dim), name="we")

        if word_embeddings is not None:
            self.words_lookup.init_from_array(word_embeddings)

        # Char LSTM Parameters
        self.use_char_rnn = use_char_rnn
        self.char_hidden_dim = hidden_dim
        if use_char_rnn:
            self.char_lookup = self.model.add_lookup_parameters((charset_size, char_embedding_dim), name="ce")
            self.char_bi_lstm = dy.BiRNNBuilder(1, char_embedding_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Word LSTM parameters
        if use_char_rnn:
            input_dim = word_embedding_dim + hidden_dim
        else:
            input_dim = word_embedding_dim
        self.word_bi_lstm = dy.BiRNNBuilder(num_lstm_layers, input_dim, hidden_dim, self.model, dy.LSTMBuilder)

        # Matrix that maps from Bi-LSTM output to num tags
        self.lstm_to_tags_params = {}
        self.lstm_to_tags_bias = {}
        self.mlp_out = {}
        self.mlp_out_bias = {}
        for att, set_size in list(tagset_sizes.items()):
            self.lstm_to_tags_params[att] = self.model.add_parameters((set_size, hidden_dim), name=att+"H")
            self.lstm_to_tags_bias[att] = self.model.add_parameters(set_size, name=att+"Hb")
            self.mlp_out[att] = self.model.add_parameters((set_size, set_size), name=att+"O")
            self.mlp_out_bias[att] = self.model.add_parameters(set_size, name=att+"Ob") 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:58,代碼來源:model.py


注:本文中的dynet.Model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。