當前位置: 首頁>>代碼示例>>Python>>正文


Python chainer.no_backprop_mode方法代碼示例

本文整理匯總了Python中chainer.no_backprop_mode方法的典型用法代碼示例。如果您正苦於以下問題:Python chainer.no_backprop_mode方法的具體用法?Python chainer.no_backprop_mode怎麽用?Python chainer.no_backprop_mode使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在chainer的用法示例。


在下文中一共展示了chainer.no_backprop_mode方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: batch_act

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def batch_act(self, batch_obs):
        xp = self.xp
        b_state = self.batch_states(batch_obs, xp, self.phi)

        if self.obs_normalizer:
            b_state = self.obs_normalizer(b_state, update=False)

        with chainer.using_config('train', False), chainer.no_backprop_mode():
            if self.recurrent:
                (action_distrib, _), self.test_recurrent_states = self.model(
                    b_state, self.test_recurrent_states)
            else:
                action_distrib, _ = self.model(b_state)
            if self.act_deterministically:
                action = chainer.cuda.to_cpu(
                    action_distrib.most_probable.array)
            else:
                action = chainer.cuda.to_cpu(action_distrib.sample().array)

        return action 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:22,代碼來源:ppo.py

示例2: batch_act

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def batch_act(self, batch_obs):
        """Select a batch of actions for evaluation.

        Args:
            batch_obs (Sequence of ~object): Observations.

        Returns:
            Sequence of ~object: Actions.
        """

        with chainer.using_config('train', False), chainer.no_backprop_mode():
            batch_xs = self.batch_states(batch_obs, self.xp, self.phi)
            batch_action = self.policy(batch_xs).sample()
            # Q is not needed here, but log it just for information
            q = self.q_function(batch_xs, batch_action)

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * float(
            q.array.mean(axis=0))
        self.logger.debug('t:%s a:%s q:%s',
                          self.t, batch_action.array[0], q.array)
        return [cuda.to_cpu(action.array) for action in batch_action] 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:25,代碼來源:ddpg.py

示例3: compute_policy_gradient_full_correction

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def compute_policy_gradient_full_correction(
        action_distrib, action_distrib_mu, action_value, v,
        truncation_threshold):
    """Compute off-policy bias correction term wrt all actions."""
    assert truncation_threshold is not None
    assert np.isscalar(v)
    with chainer.no_backprop_mode():
        rho_all_inv = compute_full_importance(action_distrib_mu,
                                              action_distrib)
        correction_weight = (
            np.maximum(1 - truncation_threshold * rho_all_inv,
                       np.zeros_like(rho_all_inv)) *
            action_distrib.all_prob.array[0])
        correction_advantage = action_value.q_values.array[0] - v
    return -F.sum(correction_weight *
                  action_distrib.all_log_prob *
                  correction_advantage, axis=1) 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:19,代碼來源:acer.py

示例4: update_on_policy

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def update_on_policy(self, statevar):
        assert self.t_start < self.t

        if not self.disable_online_update:
            if statevar is None:
                R = 0
            else:
                with chainer.no_backprop_mode():
                    with state_kept(self.model):
                        action_distrib, action_value, v = self.model(statevar)
                R = float(v.array)
            self.update(
                t_start=self.t_start, t_stop=self.t, R=R,
                states=self.past_states,
                actions=self.past_actions,
                rewards=self.past_rewards,
                values=self.past_values,
                action_values=self.past_action_values,
                action_distribs=self.past_action_distrib,
                action_distribs_mu=None,
                avg_action_distribs=self.past_avg_action_distrib)

        self.init_history_data_for_online_update() 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:25,代碼來源:acer.py

示例5: _compute_y_and_t

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def _compute_y_and_t(self, exp_batch):
        batch_size = exp_batch['reward'].shape[0]

        # Compute Q-values for current states
        batch_state = exp_batch['state']

        if self.recurrent:
            qout, _ = self.model.n_step_forward(
                batch_state,
                exp_batch['recurrent_state'],
                output_mode='concat',
            )
        else:
            qout = self.model(batch_state)

        batch_actions = exp_batch['action']
        batch_q = F.reshape(qout.evaluate_actions(
            batch_actions), (batch_size, 1))

        with chainer.no_backprop_mode():
            batch_q_target = F.reshape(
                self._compute_target_values(exp_batch),
                (batch_size, 1))

        return batch_q, batch_q_target 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:27,代碼來源:dqn.py

示例6: batch_act_and_train

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def batch_act_and_train(self, batch_obs):
        with chainer.using_config('train', False), chainer.no_backprop_mode():
            batch_av = self._evaluate_model_and_update_recurrent_states(
                batch_obs, test=False)
            batch_maxq = batch_av.max.array
            batch_argmax = cuda.to_cpu(batch_av.greedy_actions.array)
        batch_action = [
            self.explorer.select_action(
                self.t, lambda: batch_argmax[i],
                action_value=batch_av[i:i + 1],
            )
            for i in range(len(batch_obs))]
        self.batch_last_obs = list(batch_obs)
        self.batch_last_action = list(batch_action)

        # Update stats
        self.average_q *= self.average_q_decay
        self.average_q += (1 - self.average_q_decay) * float(batch_maxq.mean())

        return batch_action 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:22,代碼來源:dqn.py

示例7: _compute_loss

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def _compute_loss(self, exp_batch, errors_out=None):
        """Compute a loss.

        Returns:
            Returns:
                chainer.Variable: Scalar loss.
        """
        y, taus = self._compute_y_and_taus(exp_batch)
        with chainer.no_backprop_mode():
            t = self._compute_target_values(exp_batch)

        eltwise_loss = compute_eltwise_huber_quantile_loss(y, t, taus)
        if errors_out is not None:
            del errors_out[:]
            delta = F.mean(eltwise_loss, axis=(1, 2))
            errors_out.extend(cuda.to_cpu(delta.array))

        if 'weights' in exp_batch:
            return compute_weighted_value_loss(
                eltwise_loss, exp_batch['weights'],
                batch_accumulator=self.batch_accumulator)
        else:
            return compute_value_loss(
                eltwise_loss, batch_accumulator=self.batch_accumulator) 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:26,代碼來源:iqn.py

示例8: act

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def act(self, obs):
        xp = self.xp
        b_state = self.batch_states([obs], xp, self.phi)

        if self.obs_normalizer:
            b_state = self.obs_normalizer(b_state, update=False)

        with chainer.using_config('train', False), chainer.no_backprop_mode():
            if self.recurrent:
                action_distrib, self.test_recurrent_states =\
                    self.policy(b_state, self.test_recurrent_states)
            else:
                action_distrib = self.policy(b_state)
            if self.act_deterministically:
                action = chainer.cuda.to_cpu(
                    action_distrib.most_probable.array)[0]
            else:
                action = chainer.cuda.to_cpu(
                    action_distrib.sample().array)[0]

        return action 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:23,代碼來源:trpo.py

示例9: main

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def main():
    setup_dir = 'result/nn_guesser/args.json'
    model, vocab, answers, args = setup_model(setup_dir)

    questions = QuestionDatabase().all_questions().values()
    questions = [q for q in questions if q.fold == GUESSER_DEV_FOLD]
    percentages = [0.1, 0.25, 0.5, 0.75, 1.0]
    results = [[] for _ in percentages]
    
    for q in tqdm(questions):
        text = nlp(q.flatten_text())
        for i, per in enumerate(percentages):
            t = text[:int(len(text) * per)]
            t = [w.lower_ for w in t if w.is_alpha or w.is_digit]
            xs = nlp_utils.transform_to_array([t], vocab, with_label=False)
            xs = nlp_utils.convert_seq(xs, device=args.gpu, with_label=False)
            with chainer.using_config('train', False), chainer.no_backprop_mode():
                prob = model.predict(xs, softmax=True)[0]
            guess = answers[int(model.xp.argmax(prob))]
            results[i].append(guess == q.page)
    for i, rs in enumerate(results):
        print(percentages[i], sum(rs) / len(rs)) 
開發者ID:Pinafore,項目名稱:qb,代碼行數:24,代碼來源:test.py

示例10: compute_test_loss

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def compute_test_loss(self, test_data, mb_size=64, nb_mb_for_sorting= 20):
        def mb_provider():
            required_data = nb_mb_for_sorting * mb_size
            cursor = 0
            while cursor < len(test_data):
                larger_batch = test_data[cursor:cursor+required_data]
                cursor += required_data
                for minibatch in batch_sort_and_split(larger_batch, size_parts = mb_size):
                    yield six.moves.zip(*minibatch)
        
        with chainer.using_config("train", False), chainer.no_backprop_mode():
            total_loss = 0
            total_nb_predictions = 0.0     
            for src_batch, tgt_batch in mb_provider():
                loss = self.compute_loss(src_batch, tgt_batch, reduce="no")
                nb_tgt_words = sum(len(seq) + 1 for seq in tgt_batch) # +1 for eos
                total_loss += self.xp.sum(loss.data)
                total_nb_predictions += nb_tgt_words
            return total_loss / total_nb_predictions 
開發者ID:fabiencro,項目名稱:knmt,代碼行數:21,代碼來源:encoder_decoder.py

示例11: compute_loss_all

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def compute_loss_all(encdec, test_data, eos_idx, mb_size, gpu=None, reverse_src=False, reverse_tgt=False,
                        use_chainerx=False):
    with chainer.using_config("train", False), chainer.no_backprop_mode():
        if encdec.encdec_type() == "ff":
            assert not reverse_src and not reverse_tgt
            return encdec.compute_test_loss(test_data, mb_size=mb_size, nb_mb_for_sorting=20)
        
        mb_provider_test = minibatch_provider(test_data, eos_idx, mb_size, nb_mb_for_sorting=-1, loop=False,
                                              gpu=gpu,
                                              reverse_src=reverse_src, reverse_tgt=reverse_tgt, use_chainerx=use_chainerx)
        test_loss = 0
        test_nb_predictions = 0
        for src_batch, tgt_batch, src_mask in mb_provider_test:
            loss, attn = encdec(src_batch, tgt_batch, src_mask, raw_loss_info=True)
            test_loss += loss[0].data
            test_nb_predictions += loss[1]
        test_loss /= test_nb_predictions
        return test_loss 
開發者ID:fabiencro,項目名稱:knmt,代碼行數:20,代碼來源:evaluation.py

示例12: predict

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def predict(model, sentence):
    model, vocab, setup = model
    sentence = sentence.strip()
    text = nlp_utils.normalize_text(sentence)
    # words = nlp_utils.split_text(text, char_based=setup['char_based'])
    if setup['char_based']:
        words = list(text)
    else:
        words = word_tokenize_txt(text)
    xs = nlp_utils.transform_to_array([words], vocab, with_label=False)
    xs = nlp_utils.convert_seq(xs, device=-1, with_label=False)  # todo use GPU
    with chainer.using_config('train', False), chainer.no_backprop_mode():
        prob = model.predict(xs, softmax=True)[0]
    answer = int(model.xp.argmax(prob))
    score = float(prob[answer])
    return answer, score 
開發者ID:vecto-ai,項目名稱:vecto,代碼行數:18,代碼來源:text_classification.py

示例13: get_vectors

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def get_vectors(model, sentences):
    model, vocab, setup = model
    vectors = []
    for sentence in sentences:
        sentence = sentence.strip()
        text = nlp_utils.normalize_text(sentence)
        if setup['char_based']:
            words = list(text)
        else:
            words = word_tokenize_txt(text)
        xs = nlp_utils.transform_to_array([words], vocab, with_label=False)
        xs = nlp_utils.convert_seq(xs, device=-1, with_label=False)  # todo use GPU
        with chainer.using_config('train', False), chainer.no_backprop_mode():
            vector = model.encoder(xs)
            vectors.append(vector.data[0])
    vectors = numpy.asarray(vectors)
    return vectors 
開發者ID:vecto-ai,項目名稱:vecto,代碼行數:19,代碼來源:text_classification.py

示例14: __call__

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def __call__(self, trainer):
        device = self.device

        with chainer.no_backprop_mode():
            references = []
            hypotheses = []
            for i in range(0, len(self.test_data), self.batch):
                sources, targets = zip(*self.test_data[i:i + self.batch])
                references.extend([[t.tolist()] for t in targets])

                sources = [device.send(x) for x in sources]
                ys = [y.tolist()
                      for y in self.model.translate(sources, self.max_length)]
                hypotheses.extend(ys)

        bleu = bleu_score.corpus_bleu(
            references, hypotheses,
            smoothing_function=bleu_score.SmoothingFunction().method1)
        chainer.report({self.key: bleu}) 
開發者ID:chainer,項目名稱:chainer,代碼行數:21,代碼來源:seq2seq.py

示例15: __call__

# 需要導入模塊: import chainer [as 別名]
# 或者: from chainer import no_backprop_mode [as 別名]
def __call__(self, trainer):
        with chainer.no_backprop_mode():
            references = []
            hypotheses = []
            for i in range(0, len(self.test_data), self.batch):
                sources, targets = zip(*self.test_data[i:i + self.batch])
                references.extend([[t.tolist()] for t in targets])

                sources = [
                    chainer.dataset.to_device(self.device, x) for x in sources]
                ys = [y.tolist()
                      for y in self.model.translate(sources, self.max_length)]
                hypotheses.extend(ys)

        bleu = bleu_score.corpus_bleu(
            references, hypotheses,
            smoothing_function=bleu_score.SmoothingFunction().method1)
        reporter.report({self.key: bleu}) 
開發者ID:chainer,項目名稱:chainer,代碼行數:20,代碼來源:seq2seq.py


注:本文中的chainer.no_backprop_mode方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。