當前位置: 首頁>>代碼示例>>Python>>正文


Python dynet.esum方法代碼示例

本文整理匯總了Python中dynet.esum方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.esum方法的具體用法?Python dynet.esum怎麽用?Python dynet.esum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dynet的用法示例。


在下文中一共展示了dynet.esum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _attend

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def _attend(self, input_vectors, decoder):
        w1 = self.att_w1.expr(update=True)
        w2 = self.att_w2.expr(update=True)
        v = self.att_v.expr(update=True)
        attention_weights = []

        w2dt = w2 * decoder.s()[-1]
        for input_vector in input_vectors:
            attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
            attention_weights.append(attention_weight)

        attention_weights = dy.softmax(dy.concatenate(attention_weights))

        output_vectors = dy.esum(
            [vector * attention_weight for vector, attention_weight in zip(input_vectors, attention_weights)])

        return output_vectors, attention_weights 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:19,代碼來源:g2p.py

示例2: _make_input

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def _make_input(self, seq):
        x_list = [self.phone_lookup[self.encodings.char2int['START']]]
        for pi in seq:
            if pi.char not in self.encodings.char2int:
                print("Unknown input: '" + pi.char + "'")
            else:
                char_emb = self.phone_lookup[self.encodings.char2int[pi.char]]
                context = []
                for feature in pi.context:
                    if feature in self.encodings.context2int:
                        context.append(self.feature_lookup[self.encodings.context2int[feature]])
                if len(context) == 0:
                    x_list.append(char_emb)
                else:
                    x_list.append(char_emb + dy.esum(context) * dy.scalarInput(1.0 / len(context)))
        x_list.append(self.phone_lookup[self.encodings.char2int['STOP']])
        return x_list 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:19,代碼來源:encoder.py

示例3: __getitem__

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def __getitem__(self, i):
            """
            Return the weighted layers for the current word.
            :param i: Word at index i in the sentence.
            :return: Embedding for the word
            """
            layers = self._get_sentence_layers(i)

            normalized_weights = dy.softmax(self.elmo.weights)
            y_hat = [
                dy.inputTensor(layer) * weight
                for layer, weight in zip(layers, normalized_weights)
            ]

            # Sum the layer contents together
            return dy.esum(y_hat) * self.elmo.gamma 
開發者ID:UppsalaNLP,項目名稱:uuparser,代碼行數:18,代碼來源:elmo.py

示例4: _doc_loss

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def _doc_loss(self, doc, y):
        y_node = self.prop_encoder_.transform(y.nodes)
        y_link = self.link_encoder_.transform(y.links)

        props, links, _, _, _, _ = self.build_cg(doc)

        obj_prop = [dy.hinge(prop, y_) for prop, y_ in zip(props, y_node)]
        obj_link = [dy.hinge(link, y_) for link, y_ in zip(links, y_link)]

        obj = dy.esum(obj_prop) + dy.esum(obj_link)

        correct = sum(1 for val in obj_prop + obj_link
                      if val.scalar_value() == 0)

        max_acc = len(obj_prop + obj_link)
        return obj, max_acc - correct, max_acc, 'n/a' 
開發者ID:vene,項目名稱:marseille,代碼行數:18,代碼來源:argrnn.py

示例5: loss

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def loss(self, sentence, word_chars, tags_set):
        '''
        For use in training phase.
        Tag sentence (all attributes) and compute loss based on probability of expected tags.
        '''
        observations_set = self.build_tagging_graph(sentence, word_chars)
        errors = {}
        for att, tags in tags_set.items():
            err = []
            for obs, tag in zip(observations_set[att], tags):
                err_t = dy.pickneglogsoftmax(obs, tag)
                err.append(err_t)
            errors[att] = dy.esum(err)
        if self.att_props is not None:
            for att, err in errors.items():
                prop_vec = dy.inputVector([self.att_props[att]] * err.dim()[0])
                err = dy.cmult(err, prop_vec)
        return errors 
開發者ID:yuvalpinter,項目名稱:Mimick,代碼行數:20,代碼來源:model.py

示例6: evaluate

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def evaluate(self, inputs, outputs):
        params = M.get_collection()
        optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
        num_batches = int(len(self.train_dataset) / self.batch_size)
        for epoch in range(self.max_num_training_epochs):
            random.shuffle(self.train_dataset)
            i = 0
            total_loss = 0
            while (i < len(self.train_dataset)):
                dy.renew_cg()
                mbsize = min(self.batch_size, len(self.train_dataset) - i)
                minibatch = self.train_dataset[i:i + mbsize]
                losses = []
                for (label, img) in minibatch:
                    x = dy.inputVector(img)
                    co.forward({inputs['in']: x})
                    logits = outputs['out'].val
                    loss = dy.pickneglogsoftmax(logits, label)
                    losses.append(loss)
                mbloss = dy.esum(losses) / mbsize
                mbloss.backward()
                optimizer.update()
                total_loss += mbloss.scalar_value()
                i += mbsize

            val_acc = self.compute_accuracy(inputs, outputs)
            if self.log_output_to_terminal and epoch % self.display_step == 0:
                print("epoch:", '%d' % (epoch + 1), "loss:",
                      "{:.9f}".format(total_loss / num_batches),
                      "validation_accuracy:", "%.5f" % val_acc)

        val_acc = self.compute_accuracy(inputs, outputs)
        return {'val_acc': val_acc} 
開發者ID:negrinho,項目名稱:deep_architect,代碼行數:35,代碼來源:mnist_dynet.py

示例7: train_epoch

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def train_epoch(self, batch_size):
        batches = list(self.batch(self.train_data, batch_size))
        for batch in tqdm(batches, unit="batch-" + str(batch_size)):
            dy.renew_cg()
            error = dy.esum([dy.average(list(self.model.forward(_in, _out))) for _in, _out in batch])

            self.losses.append(float(error.value()) / len(batch))

            error.backward()
            self.trainer.update()

        time.sleep(0.01)

        return sum(self.losses[-1 * len(batches):]) / len(batches) 
開發者ID:AmitMY,項目名稱:chimera,代碼行數:16,代碼來源:dynet_model_executer.py

示例8: train_step

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def train_step(model, batch, lr_coeff, dropout):
    dy.renew_cg()
    losses = []
    assert not model.prev_decoder_snippet_rep

    num_tokens = 0
    for item in batch.items:
        loss = model.prepare_and_predict(item,
                                         use_gold=True,
                                         training=True,
                                         dropout=dropout)
        num_tokens += len(item.gold_query())
        losses.append(loss)

    final_loss = dy.esum(losses) / num_tokens
    final_loss.forward()
    final_loss.backward()
    model.trainer.learning_rate = lr_coeff
    model.trainer.update()

    return final_loss.npvalue()[0]


# eval_step
# Runs an evaluation on the example.
#
# Inputs:
#    example: an Utterance.
#    use_gold: whether or not to pass gold tokens into the decoder.
#
# Outputs:
#    information provided by prepare and predict 
開發者ID:lil-lab,項目名稱:atis,代碼行數:34,代碼來源:utterance_model.py

示例9: encode_ws

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def encode_ws(self, X, train=False):
        dy.renew_cg()

        # Remove dy.parameters(...) for DyNet v.2.1
        #w_ws = dy.parameter(self.w_ws)
        #b_ws = dy.parameter(self.b_ws)
        w_ws = self.w_ws
        b_ws = self.b_ws

        ipts = []
        length = len(X[0])
        for i in range(length):
            uni   = X[0][i]
            bi    = X[1][i]
            ctype = X[2][i]
            start = X[3][i]
            end   = X[4][i]

            vec_uni   = dy.concatenate([self.UNI[uid] for uid in uni])
            vec_bi    = dy.concatenate([self.BI[bid] for bid in bi])
            vec_start = dy.esum([self.WORD[sid] for sid in start])
            vec_end   = dy.esum([self.WORD[eid] for eid in end])
            vec_ctype = dy.concatenate([self.CTYPE[cid] for cid in ctype])
            vec_at_i  = dy.concatenate([vec_uni, vec_bi, vec_ctype, vec_start, vec_end])

            if train is True:
                vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
            ipts.append(vec_at_i)

        bilstm_outputs = self.ws_model.transduce(ipts)
        observations   = [w_ws*h+b_ws for h in bilstm_outputs]
        return observations 
開發者ID:taishi-i,項目名稱:nagisa,代碼行數:34,代碼來源:model.py

示例10: encode_pt

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def encode_pt(self, X, train=False):
        dy.renew_cg()

        # Remove dy.parameters(...) for DyNet v.2.1
        #w_pos = dy.parameter(self.w_pos)
        #b_pos = dy.parameter(self.b_pos)
        w_pos = self.w_pos
        b_pos = self.b_pos

        ipts  = []
        length = len(X[0])
        for i in range(length):
            cids = X[0][i]
            wid  = X[1][i]
            tids = X[2][i]
            vec_char = self.char_seq_model.transduce([self.UNI[cid] for cid in cids])[-1]

            vec_tags = []
            for tid in tids:
                if tid == 0:
                    zero = dy.inputVector(np.zeros(self.dim_tag_emb))
                    vec_tags.append(zero)
                else:
                    vec_tags.append(self.POS[tid])
            vec_tag = dy.esum(vec_tags)

            if wid == 0:
                vec_word = dy.inputVector(np.zeros(self.dim_word))
            else:
                vec_word = self.WORD[wid]

            vec_at_i = dy.concatenate([vec_word, vec_char, vec_tag])
            if train is True:
                vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
            ipts.append(vec_at_i)
        hiddens = self.pos_model.transduce(ipts)
        probs = [dy.softmax(w_pos*h+b_pos) for h in hiddens]
        return probs 
開發者ID:taishi-i,項目名稱:nagisa,代碼行數:40,代碼來源:model.py

示例11: get_POStagging_loss

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def get_POStagging_loss(self, X, Y):
        losses = []
        probs = self.encode_pt(X, train=True)
        for prob, y in zip(probs, Y):
            losses.append(-dy.log(dy.pick(prob, y)))
        loss = dy.esum(losses)
        return loss 
開發者ID:taishi-i,項目名稱:nagisa,代碼行數:9,代碼來源:model.py

示例12: end_batch

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def end_batch(self):
        total_loss = 0
        if len(self.losses) != 0:
            loss = dy.esum(self.losses)
            self.losses = []
            total_loss = loss.value()
            loss.backward()
            self.trainer.update()
        return total_loss 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:11,代碼來源:g2p.py

示例13: learn

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def learn(self, characters, target_mgc, guided_att=True):
        num_mgc = target_mgc.shape[0]
        # print num_mgc
        dy.renew_cg()

        for pi in characters:
            if pi.char not in self.encodings.char2int:
                print("Unknown input: '" + pi.char + "' - skipping file")
                return 0

        style_probs = self.compute_gold_style_probs(target_mgc)

        output_mgc, output_stop, output_attention = self._predict(characters, target_mgc, style_probs=style_probs)
        losses = []
        index = 0
        for mgc, real_mgc in zip(output_mgc, target_mgc):
            t_mgc = dy.inputVector(real_mgc)
            # losses.append(self._compute_binary_divergence(mgc, t_mgc) )
            losses.append(dy.l1_distance(mgc, t_mgc))

            if index % 3 == 0:
                # attention loss
                if guided_att:
                    att = output_attention[index // 3]
                    losses.append(self._compute_guided_attention(att, index // 3, len(characters) + 2, num_mgc // 3))
                # EOS loss
                stop = output_stop[index // 3]
                if index >= num_mgc - 6:
                    losses.append(dy.l1_distance(stop, dy.scalarInput(-0.8)))
                else:
                    losses.append(dy.l1_distance(stop, dy.scalarInput(0.8)))
            index += 1
        loss = dy.esum(losses)
        loss_val = loss.value() / num_mgc
        loss.backward()
        self.trainer.update()
        return loss_val 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:39,代碼來源:encoder.py

示例14: _attend

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def _attend(self, input_list, decoder_state, last_pos=None):
        w1 = self.att_w1.expr(update=True)
        w2 = self.att_w2.expr(update=True)
        v = self.att_v.expr(update=True)
        attention_weights = []

        w2dt = w2 * dy.concatenate([decoder_state.s()[-1]])
        for input_vector in input_list:
            attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
            attention_weights.append(attention_weight)

        attention_weights = dy.softmax(dy.concatenate(attention_weights))
        # force incremental attention if this is runtime
        if last_pos is not None:
            current_pos = np.argmax(attention_weights.value())
            if current_pos < last_pos or current_pos >= last_pos + 2:
                current_pos = last_pos + 1
                if current_pos >= len(input_list):
                    current_pos = len(input_list) - 1
                output_vectors = input_list[current_pos]
                simulated_att = np.zeros((len(input_list)))
                simulated_att[current_pos] = 1.0
                new_att_vec = dy.inputVector(simulated_att)
                return output_vectors, new_att_vec

        output_vectors = dy.esum(
            [vector * attention_weight for vector, attention_weight in zip(input_list, attention_weights)])

        return output_vectors, attention_weights 
開發者ID:tiberiu44,項目名稱:TTS-Cube,代碼行數:31,代碼來源:encoder.py

示例15: train_step

# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import esum [as 別名]
def train_step(self, batch):
        """Training step for a batch of examples.

        Input:
            batch (list of examples): Batch of examples used to update.
        """
        dy.renew_cg(autobatching=True)

        losses = []
        total_gold_tokens = 0

        batch.start()
        while not batch.done():
            example = batch.next()

            # First, encode the input sequences.
            input_sequences = example.histories(
                self.params.maximum_utterances - 1) + [example.input_sequence()]
            final_state, utterance_hidden_states = self._encode_input_sequences(
                input_sequences)

            # Add positional embeddings if appropriate
            if self.params.state_positional_embeddings:
                utterance_hidden_states = self._add_positional_embeddings(
                    utterance_hidden_states, input_sequences)

            # Encode the snippets
            snippets = []
            if self.params.use_snippets:
                snippets = self._encode_snippets(example.previous_query(), snippets)

            # Decode
            flat_seq = []
            for sequence in input_sequences:
                flat_seq.extend(sequence)
            decoder_results = self.decoder(
                final_state,
                utterance_hidden_states,
                self.params.train_maximum_sql_length,
                snippets=snippets,
                gold_sequence=example.gold_query(),
                dropout_amount=self.dropout,
                input_sequence=flat_seq)
            all_scores = [
                step.scores for step in decoder_results.predictions]
            all_alignments = [
                step.aligned_tokens for step in decoder_results.predictions]
            loss = du.compute_loss(example.gold_query(),
                                   all_scores,
                                   all_alignments,
                                   get_token_indices)
            losses.append(loss)
            total_gold_tokens += len(example.gold_query())

        average_loss = dy.esum(losses) / total_gold_tokens
        average_loss.forward()
        average_loss.backward()
        self.trainer.update()
        loss_scalar = average_loss.value()

        return loss_scalar 
開發者ID:lil-lab,項目名稱:atis,代碼行數:63,代碼來源:model.py


注:本文中的dynet.esum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。