當前位置: 首頁>>代碼示例>>Python>>正文


Python dynet.AdamTrainer方法代碼示例

本文整理匯總了Python中dynet.AdamTrainer方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.AdamTrainer方法的具體用法?Python dynet.AdamTrainer怎麽用?Python dynet.AdamTrainer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dynet的用法示例。


在下文中一共展示了dynet.AdamTrainer方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import AdamTrainer [as 別名]
def __init__(self, vocab, options):
        import dynet as dy
        from uuparser.feature_extractor import FeatureExtractor
        global dy
        self.model = dy.ParameterCollection()
        self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
        self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
                            dy.rectify, 'tanh3': (lambda x:
                                                  dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
        self.activation = self.activations[options.activation]
        self.costaugFlag = options.costaugFlag
        self.feature_extractor = FeatureExtractor(self.model, options, vocab)
        self.labelsFlag=options.labelsFlag
        mlp_in_dims = options.lstm_output_size*2

        self.unlabeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
                                 options.mlp_hidden2_dims, 1, self.activation)
        if self.labelsFlag:
            self.labeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
                               options.mlp_hidden2_dims,len(self.feature_extractor.irels),self.activation)

        self.proj = options.proj 
開發者ID:UppsalaNLP,項目名稱:uuparser,代碼行數:24,代碼來源:mstlstm.py

示例2: init_params

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import AdamTrainer [as 別名]
def init_params(self):
        self.trainer = dy.AdamTrainer(self.model.pc) 
開發者ID:AmitMY,項目名稱:chimera,代碼行數:4,代碼來源:dynet_model_executer.py

示例3: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import AdamTrainer [as 別名]
def __init__(self, params, model=None):
        self.UPSAMPLE_PROJ = 200
        self.RNN_SIZE = 100
        self.RNN_LAYERS = 1
        self.OUTPUT_EMB_SIZE = 200
        self.params = params
        if model is None:
            self.model = dy.Model()
        else:
            self.model = model
        # self.trainer = dy.AdamTrainer(self.model, alpha=2e-3, beta_1=0.9, beta_2=0.9)
        self.trainer = dy.AdamTrainer(self.model)
        # MGCs are extracted at 12.5 ms

        upsample_count = int(12.5 * self.params.target_sample_rate / 1000)
        self.upsample_w_s = []
        self.upsample_w_t = []
        self.upsample_b_s = []
        self.upsample_b_t = []
        for _ in range(upsample_count):
            self.upsample_w_s.append(self.model.add_parameters((self.UPSAMPLE_PROJ, self.params.mgc_order)))
            self.upsample_w_t.append(self.model.add_parameters((self.UPSAMPLE_PROJ, self.params.mgc_order)))
            self.upsample_b_s.append(self.model.add_parameters((self.UPSAMPLE_PROJ)))
            self.upsample_b_t.append(self.model.add_parameters((self.UPSAMPLE_PROJ)))

        self.output_lookup = self.model.add_lookup_parameters((256, self.OUTPUT_EMB_SIZE))
        from models.utils import orthonormal_VanillaLSTMBuilder
        # self.rnn = orthonormal_VanillaLSTMBuilder(self.RNN_LAYERS, self.OUTPUT_EMB_SIZE + self.UPSAMPLE_PROJ, self.RNN_SIZE, self.model)
        self.rnn = dy.VanillaLSTMBuilder(self.RNN_LAYERS, self.OUTPUT_EMB_SIZE + self.UPSAMPLE_PROJ,
                                         self.RNN_SIZE, self.model)
        self.mlp_w = []
        self.mlp_b = []
        self.mlp_w.append(self.model.add_parameters((1024, self.RNN_SIZE)))
        self.mlp_b.append(self.model.add_parameters((1024)))

        self.softmax_w = self.model.add_parameters((256, 1024))
        self.softmax_b = self.model.add_parameters((256)) 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:39,代碼來源:vocoder_old.py

示例4: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import AdamTrainer [as 別名]
def __init__(self, vocab, options):

        # import here so we don't load Dynet if just running parser.py --help for example
        from uuparser.multilayer_perceptron import MLP
        from uuparser.feature_extractor import FeatureExtractor
        import dynet as dy
        global dy

        global LEFT_ARC, RIGHT_ARC, SHIFT, SWAP
        LEFT_ARC, RIGHT_ARC, SHIFT, SWAP = 0,1,2,3

        self.model = dy.ParameterCollection()
        self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)

        self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
                            dy.rectify, 'tanh3': (lambda x:
                            dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
        self.activation = self.activations[options.activation]

        self.oracle = options.oracle

        self.headFlag = options.headFlag
        self.rlMostFlag = options.rlMostFlag
        self.rlFlag = options.rlFlag
        self.k = options.k

        #dimensions depending on extended features
        self.nnvecs = (1 if self.headFlag else 0) + (2 if self.rlFlag or self.rlMostFlag else 0)
        self.feature_extractor = FeatureExtractor(self.model, options, vocab, self.nnvecs)
        self.irels = self.feature_extractor.irels

        if options.no_bilstms > 0:
            mlp_in_dims = options.lstm_output_size*2*self.nnvecs*(self.k+1)
        else:
            mlp_in_dims = self.feature_extractor.lstm_input_size*self.nnvecs*(self.k+1)

        self.unlabeled_MLP = MLP(self.model, 'unlabeled', mlp_in_dims, options.mlp_hidden_dims,
                                 options.mlp_hidden2_dims, 4, self.activation)
        self.labeled_MLP = MLP(self.model, 'labeled' ,mlp_in_dims, options.mlp_hidden_dims,
                               options.mlp_hidden2_dims,2*len(self.irels)+2,self.activation) 
開發者ID:UppsalaNLP,項目名稱:uuparser,代碼行數:42,代碼來源:arc_hybrid.py

示例5: __init__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import AdamTrainer [as 別名]
def __init__(self, encodings):
        self.losses = []
        self.model = dy.Model()
        self.trainer = dy.AdamTrainer(self.model, alpha=2e-3, beta_1=0.9, beta_2=0.9)
        self.encodings = encodings

        self.DECODER_SIZE = 100
        self.ENCODER_SIZE = 100
        self.CHAR_EMB_SIZE = 100
        self.HIDDEN_SIZE = 100
        self.lexicon = {}

        self.char_lookup = self.model.add_lookup_parameters((len(self.encodings.char2int), self.CHAR_EMB_SIZE))
        self.phoneme_lookup = self.model.add_lookup_parameters(
            (len(self.encodings.phoneme2int) + 1, self.CHAR_EMB_SIZE))  # +1 is for special START

        self.start_lookup = self.model.add_lookup_parameters(
            (1, self.CHAR_EMB_SIZE + self.ENCODER_SIZE * 2))  # START SYMBOL

        self.encoder_fw = []
        self.encoder_bw = []

        input_layer_size = self.CHAR_EMB_SIZE
        for ii in range(2):
            self.encoder_fw.append(dy.VanillaLSTMBuilder(1, input_layer_size, self.ENCODER_SIZE, self.model))
            self.encoder_bw.append(dy.VanillaLSTMBuilder(1, input_layer_size, self.ENCODER_SIZE, self.model))

            input_layer_size = self.ENCODER_SIZE * 2

        self.decoder = dy.VanillaLSTMBuilder(2, self.ENCODER_SIZE * 2 + self.CHAR_EMB_SIZE, self.DECODER_SIZE,
                                             self.model)

        self.att_w1 = self.model.add_parameters((100, self.ENCODER_SIZE * 2))
        self.att_w2 = self.model.add_parameters((100, self.DECODER_SIZE))
        self.att_v = self.model.add_parameters((1, 100))

        self.hidden_w = self.model.add_parameters((self.HIDDEN_SIZE, self.DECODER_SIZE))
        self.hidden_b = self.model.add_parameters((self.HIDDEN_SIZE))

        self.softmax_w = self.model.add_parameters(
            (len(self.encodings.phoneme2int) + 1, self.HIDDEN_SIZE))  # +1 is for EOS
        self.softmax_b = self.model.add_parameters((len(self.encodings.phoneme2int) + 1)) 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:44,代碼來源:g2p.py


注:本文中的dynet.AdamTrainer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。