當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.move_to_cuda方法代碼示例

本文整理匯總了Python中fairseq.utils.move_to_cuda方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.move_to_cuda方法的具體用法?Python utils.move_to_cuda怎麽用?Python utils.move_to_cuda使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在fairseq.utils的用法示例。


在下文中一共展示了utils.move_to_cuda方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _prepare_sample

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def _prepare_sample(self, sample):
        if sample == "DUMMY":
            raise Exception(
                "Trying to use an uninitialized 'dummy' batch. This usually indicates "
                "that the total number of batches is smaller than the number of "
                "participating GPUs. Try reducing the batch size or using fewer GPUs."
            )

        if sample is None or len(sample) == 0:
            return None

        if self.cuda:
            sample = utils.move_to_cuda(sample)

        def apply_half(t):
            if t.dtype is torch.float32:
                return t.half()
            return t

        if self.args.fp16:
            sample = utils.apply_to_sample(apply_half, sample)

        return sample 
開發者ID:elbayadm,項目名稱:attn2d,代碼行數:25,代碼來源:trainer.py

示例2: _prepare_sample

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def _prepare_sample(self, sample):
        if sample is None or len(sample) == 0:
            return None
        return utils.move_to_cuda(sample) 
開發者ID:nusnlp,項目名稱:crosentgec,代碼行數:6,代碼來源:trainer.py

示例3: generate_batched_itr

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def generate_batched_itr(
        self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None,
        cuda=False, timer=None, prefix_size=0,
    ):
        """Iterate over a batched dataset and yield individual translations.
        Args:
            maxlen_a/b: generate sequences of maximum length ax + b,
                where x is the source sentence length.
            cuda: use GPU for generation
            timer: StopwatchMeter for timing generations.
        """
        if maxlen_b is None:
            maxlen_b = self.maxlen

        for sample in data_itr:
            s = utils.move_to_cuda(sample) if cuda else sample
            if 'net_input' not in s:
                continue
            input = s['net_input']
            srclen = input['src_tokens'].size(1)
            if timer is not None:
                timer.start()
            with torch.no_grad():
                hypos = self.generate(
                    input['src_tokens'],
                    input['src_lengths'],
                    beam_size=beam_size,
                    maxlen=int(maxlen_a*srclen + maxlen_b),
                    prefix_tokens=s['target'][:, :prefix_size] if prefix_size > 0 else None,
                )
            if timer is not None:
                timer.stop(sum(len(h[0]['tokens']) for h in hypos))
            for i, id in enumerate(s['id'].data):
                # remove padding
                src = utils.strip_pad(input['src_tokens'].data[i, :], self.pad)
                ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
                yield id, src, ref, hypos[i] 
開發者ID:nusnlp,項目名稱:crosentgec,代碼行數:39,代碼來源:sequence_generator.py

示例4: score_batched_itr

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def score_batched_itr(self, data_itr, cuda=False, timer=None):
        """Iterate over a batched dataset and yield scored translations."""
        for sample in data_itr:
            s = utils.move_to_cuda(sample) if cuda else sample
            if timer is not None:
                timer.start()
            pos_scores, attn = self.score(s)
            for i, id in enumerate(s['id'].data):
                # remove padding from ref
                src = utils.strip_pad(s['net_input']['src_tokens'].data[i, :], self.pad)
                ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
                tgt_len = ref.numel()
                pos_scores_i = pos_scores[i][:tgt_len]
                score_i = pos_scores_i.sum() / tgt_len
                if attn is not None:
                    attn_i = attn[i]
                    _, alignment = attn_i.max(dim=0)
                else:
                    attn_i = alignment = None
                hypos = [{
                    'tokens': ref,
                    'score': score_i,
                    'attention': attn_i,
                    'alignment': alignment,
                    'positional_scores': pos_scores_i,
                }]
                if timer is not None:
                    timer.stop(s['ntokens'])
                # return results in the same format as SequenceGenerator
                yield id, src, ref, hypos 
開發者ID:nusnlp,項目名稱:crosentgec,代碼行數:32,代碼來源:sequence_scorer.py

示例5: disambiguate_pronoun

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
        sample_json = wsc_utils.convert_sentence_to_json(sentence)
        dataset = self.build_dataset_for_inference(sample_json)
        sample = dataset.collater([dataset[0]])
        if use_cuda:
            sample = utils.move_to_cuda(sample)

        def get_masked_input(tokens, mask):
            masked_tokens = tokens.clone()
            masked_tokens[mask.bool()] = self.mask
            return masked_tokens

        def get_lprobs(tokens, mask):
            logits, _ = model(src_tokens=get_masked_input(tokens, mask))
            lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
            scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
            mask = mask.type_as(scores)
            scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
            return scores

        cand_lprobs = get_lprobs(
            sample['candidate_tokens'][0],
            sample['candidate_masks'][0],
        )
        if sample['query_tokens'][0] is not None:
            query_lprobs = get_lprobs(
                sample['query_tokens'][0].unsqueeze(0),
                sample['query_masks'][0].unsqueeze(0),
            )
            return (query_lprobs >= cand_lprobs).all().item() == 1
        else:
            best_idx = cand_lprobs.argmax().item()
            full_cand = sample['candidate_tokens'][0][best_idx]
            mask = sample['candidate_masks'][0][best_idx]
            toks = full_cand[mask.bool()]
            return self.bpe.decode(self.source_dictionary.string(toks)).strip() 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:38,代碼來源:wsc_task.py

示例6: _prepare_sample

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def _prepare_sample(self, sample):
        if sample == "DUMMY":
            raise Exception(
                "Trying to use an uninitialized 'dummy' batch. This usually indicates "
                "that the total number of batches is smaller than the number of "
                "participating GPUs. Try reducing the batch size or using fewer GPUs."
            )

        if sample is None or len(sample) == 0:
            return None

        if self.cuda:
            sample = utils.move_to_cuda(sample)

        def apply_half(t):
            if t.dtype is torch.float32:
                return t.half()
            return t

        def apply_bfloat16(t):
            if t.dtype is torch.float32:
                return t.to(dtype=torch.bfloat16)
            return t

        if self.args.fp16:
            sample = utils.apply_to_sample(apply_half, sample)

        if self.args.bf16:
            sample = utils.apply_to_sample(apply_bfloat16, sample)

        return sample 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:33,代碼來源:trainer.py

示例7: backtranslate_samples

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
    """Backtranslate a list of samples.

    Given an input (*samples*) of the form:

        [{'id': 1, 'source': 'hallo welt'}]

    this will return:

        [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]

    Args:
        samples (List[dict]): samples to backtranslate. Individual samples are
            expected to have a 'source' key, which will become the 'target'
            after backtranslation.
        collate_fn (callable): function to collate samples into a mini-batch
        generate_fn (callable): function to generate backtranslations
        cuda (bool): use GPU for generation (default: ``True``)

    Returns:
        List[dict]: an updated list of samples with a backtranslated source
    """
    collated_samples = collate_fn(samples)
    s = utils.move_to_cuda(collated_samples) if cuda else collated_samples
    generated_sources = generate_fn(s)

    id_to_src = {
        sample['id']: sample['source'] for sample in samples
    }

    # Go through each tgt sentence in batch and its corresponding best
    # generated hypothesis and create a backtranslation data pair
    # {id: id, source: generated backtranslation, target: original tgt}
    return [
        {'id': id.item(), 'target': id_to_src[id.item()], 'source': hypos[0]['tokens'].cpu()}
        for id, hypos in zip(collated_samples['id'], generated_sources)
    ] 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:39,代碼來源:backtranslation_dataset.py

示例8: generate_batched_itr

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
        """Iterate over a batched dataset and yield individual translations.
        Args:
            cuda (bool, optional): use GPU for generation
            timer (StopwatchMeter, optional): time generations
        """
        for sample in data_itr:
            s = utils.move_to_cuda(sample) if cuda else sample
            if "net_input" not in s:
                continue
            input = s["net_input"]
            # model.forward normally channels prev_output_tokens into the decoder
            # separately, but SequenceGenerator directly calls model.encoder
            encoder_input = {
                k: v for k, v in input.items() if k != "prev_output_tokens"
            }
            if timer is not None:
                timer.start()
            with torch.no_grad():
                hypos = self.generate(encoder_input)
            if timer is not None:
                timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
            for i, id in enumerate(s["id"].data):
                # remove padding
                src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
                ref = (
                    utils.strip_pad(s["target"].data[i, :], self.pad)
                    if s["target"] is not None
                    else None
                )
                yield id, src, ref, hypos[i] 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:33,代碼來源:sequence_generator.py

示例9: _generate_translation

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def _generate_translation(self, model, tgt_dict, sample, beam_size, **kwargs):
        translator_class = beam_decode.SequenceGenerator
        translator = translator_class(models=[model], tgt_dict=tgt_dict, **kwargs)
        translator.cuda()
        s = utils.move_to_cuda(sample)

        # TODO: nbest
        input = s["net_input"]
        srclen = input["src_tokens"].size(1)
        if self.task.use_char_source:
            encoder_input = {
                k: v
                for k, v in input.items()
                if k in ["src_tokens", "src_lengths", "char_inds", "word_lengths"]
            }
        else:
            encoder_input = {
                k: v for k, v in input.items() if k in ["src_tokens", "src_lengths"]
            }
        with torch.no_grad():
            hypos = translator.generate(
                encoder_input=encoder_input,
                beam_size=beam_size,
                maxlen=int(self.args.max_len_a * srclen + self.args.max_len_b),
            )
            for i, id in enumerate(s["id"]):
                # remove padding
                src = utils.strip_pad(input["src_tokens"][i, :], tgt_dict.pad())
                yield id, src, hypos[i] 
開發者ID:pytorch,項目名稱:translate,代碼行數:31,代碼來源:dual_learning_criterion.py

示例10: _prepare_sample

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def _prepare_sample(self, sample):
        if sample is None or len(sample) == 0:
            return None
        if self.cuda:
            sample = utils.move_to_cuda(sample)
        return sample 
開發者ID:kakaobrain,項目名稱:helo_word,代碼行數:8,代碼來源:trainer.py

示例11: backtranslate_samples

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
    """Backtranslate a list of samples.

    Given an input (*samples*) of the form:

        [{'id': 1, 'source': 'hallo welt'}]

    this will return:

        [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]

    Args:
        samples (List[dict]): samples to backtranslate. Individual samples are
            expected to have a 'source' key, which will become the 'target'
            after backtranslation.
        collate_fn (callable): function to collate samples into a mini-batch
        generate_fn (callable): function to generate backtranslations
        cuda (bool): use GPU for generation (default: ``True``)

    Returns:
        List[dict]: an updated list of samples with a backtranslated source
    """
    collated_samples = collate_fn(samples)
    s = utils.move_to_cuda(collated_samples) if cuda else collated_samples
    generated_sources = generate_fn(s['net_input'])

    def update_sample(sample, generated_source):
        sample['target'] = sample['source']  # the original source becomes the target
        sample['source'] = generated_source
        return sample

    # Go through each tgt sentence in batch and its corresponding best
    # generated hypothesis and create a backtranslation data pair
    # {id: id, source: generated backtranslation, target: original tgt}
    return [
        update_sample(
            sample=input_sample,
            generated_source=hypos[0]['tokens'].cpu(),  # highest scoring hypo is first
        )
        for input_sample, hypos in zip(samples, generated_sources)
    ] 
開發者ID:kakaobrain,項目名稱:helo_word,代碼行數:43,代碼來源:backtranslation_dataset.py

示例12: generate_batched_itr

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def generate_batched_itr(
        self,
        data_itr,
        beam_size=None,
        maxlen_a=0.0,
        maxlen_b=None,
        cuda=False,
        timer=None,
        prefix_size=0,
    ):
        """Iterate over a batched dataset and yield individual translations.

        Args:
            maxlen_a/b: generate sequences of maximum length ax + b,
                where x is the source sentence length.
            cuda: use GPU for generation
            timer: StopwatchMeter for timing generations.
        """
        if maxlen_b is None:
            maxlen_b = self.maxlen

        for sample in data_itr:
            if cuda:
                s = utils.move_to_cuda(sample)
            input = s["net_input"]
            # Take the max source length to compute the max target length
            srclen = input["src_tokens"].size(1)
            # FIXME: handle characters properly
            if self.use_char_source:
                raise ValueError(
                    "Character level encoder is not supported yet for "
                    "multisource sentences."
                )
            encoder_inputs = (input["src_tokens"], input["src_lengths"])
            if timer is not None:
                timer.start()
            with torch.no_grad():
                hypos = self.generate(
                    encoder_inputs,
                    srcs_ids=input["src_ids"],
                    beam_size=beam_size,
                    maxlen=int(maxlen_a * srclen + maxlen_b),
                    prefix_tokens=s["target"][:, :prefix_size]
                    if prefix_size > 0
                    else None,
                )
            if timer is not None:
                timer.stop(s["ntokens"])
            for i, id in enumerate(s["id"]):
                src = input["src_tokens"].index_select(
                    0, input["src_ids"][self.align_to]
                )
                # remove padding from ref
                ref = utils.strip_pad(s["target"][i, :], self.pad)
                yield id, src, ref, hypos[i] 
開發者ID:pytorch,項目名稱:translate,代碼行數:57,代碼來源:multisource_decode.py

示例13: test_collect_top_k_probs

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def test_collect_top_k_probs(self):
        test_args = test_utils.ModelParamsDict(arch="hybrid_transformer_rnn")
        _, src_dict, tgt_dict = test_utils.prepare_inputs(test_args)
        self.task = tasks.DictionaryHolderTask(src_dict, tgt_dict)
        model = self.task.build_model(test_args)

        use_cuda = torch.cuda.is_available()
        if use_cuda:
            model.cuda()
        model.eval()

        binarized_source = test_utils.create_dummy_binarized_dataset()
        binarized_target = test_utils.create_dummy_binarized_dataset(append_eos=True)
        dataset = language_pair_dataset.LanguagePairDataset(
            src=binarized_source,
            src_sizes=binarized_source.sizes,
            src_dict=self.task.src_dict,
            tgt=binarized_target,
            tgt_sizes=binarized_target.sizes,
            tgt_dict=self.task.dst_dict,
            left_pad_source=False,
        )

        top_k_scores, top_k_indices = collect_top_k_probs.compute_top_k(
            task=self.task,
            models=[model],
            dataset=dataset,
            k=3,
            use_cuda=use_cuda,
            max_tokens=None,
            max_sentences=None,
            progress_bar_args=None,
        )

        batch = language_pair_dataset.collate(
            [dataset[0]],
            pad_idx=self.task.src_dict.pad(),
            eos_idx=self.task.src_dict.eos(),
            left_pad_source=False,
        )

        sample = batch["net_input"]
        if use_cuda:
            sample = utils.move_to_cuda(sample)

        with torch.no_grad():
            net_output = model(**sample)
            probs = model.get_normalized_probs(net_output, log_probs=False)

        top_probs, top_indices = torch.topk(probs[0, 0], k=3)
        if use_cuda:
            top_probs = top_probs.cpu()
            top_indices = top_indices.cpu()

        np.testing.assert_array_equal(top_k_indices[0], top_indices.numpy())
        normalized_probs = (top_probs / top_probs.sum()).numpy()
        np.testing.assert_almost_equal(top_k_scores[0], normalized_probs) 
開發者ID:pytorch,項目名稱:translate,代碼行數:59,代碼來源:test_knowledge_distillation.py

示例14: generate_batched_itr

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def generate_batched_itr(
        self,
        data_itr,
        beam_size=None,
        maxlen_a=0.0,
        maxlen_b=None,
        cuda=False,
        timer=None,
        prefix_size=0,
    ):
        """Iterate over a batched dataset and yield individual translations.

        Args:
            maxlen_a/b: generate sequences of maximum length ax + b,
                where x is the source sentence length.
            cuda: use GPU for generation
            timer: StopwatchMeter for timing generations.
        """
        if maxlen_b is None:
            maxlen_b = self.maxlen

        for sample in data_itr:
            if "net_input" not in sample:
                continue

            if cuda:
                s = utils.move_to_cuda(sample)
            else:
                s = sample
            input = s["net_input"]
            srclen = input["src_tokens"].size(1)
            if self.use_char_source:
                encoder_input = {
                    k: v
                    for k, v in input.items()
                    if k in ["src_tokens", "src_lengths", "char_inds", "word_lengths"]
                }
            else:
                encoder_input = {
                    k: v for k, v in input.items() if k in ["src_tokens", "src_lengths"]
                }
            if timer is not None:
                timer.start()
            with torch.no_grad():
                hypos = self.generate(
                    encoder_input=encoder_input,
                    beam_size=beam_size,
                    maxlen=int(maxlen_a * srclen + maxlen_b),
                    prefix_tokens=s["target"][:, :prefix_size]
                    if prefix_size > 0
                    else None,
                )
            if timer is not None:
                timer.stop(s["ntokens"])
            for i, id in enumerate(s["id"]):
                # remove padding
                src = utils.strip_pad(input["src_tokens"][i, :], self.pad)
                ref = utils.strip_pad(s["target"][i, :], self.pad)
                yield id, src, ref, hypos[i] 
開發者ID:pytorch,項目名稱:translate,代碼行數:61,代碼來源:beam_decode.py

示例15: score

# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import move_to_cuda [as 別名]
def score(self, lines):


        batch = self.make_batches(lines)

        sample_score_dict = {}



        # with progress_bar.build_progress_bar(self.args, itr) as t:
        for sample in batch:
            sample_id_lst = sample['id']
            sample = utils.move_to_cuda(sample) if self.use_cuda else sample
            if 'net_input' not in sample:
                continue

            hypos = self.scorer.generate(self.models, sample)

            # print(hypos)


            for sample_id, hypos_i in zip(sample_id_lst, hypos):
                hypo = hypos_i[0]
                pos_scores = hypo['positional_scores']

                inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
                if inf_scores.any():
                    print('| Skipping tokens with inf scores:',
                          self.task.target_dictionary.string(hypo['tokens'][inf_scores.nonzero()]))
                    pos_scores = pos_scores[(~inf_scores).nonzero()]
                sample_score = pos_scores.sum().cpu()
                count = pos_scores.numel()

                w_lst = []
                word_prob = []
                for i in range(len(hypo['tokens'])):
                    w_ind = hypo['tokens'][i].item()
                    w = self.task.dictionary[w_ind]
                    word_prob.append((w, pos_scores[i].item()))
                    w_lst.append(w)

                sample_score = -sample_score / count

                if not self.args.quiet:
                    if self.args.output_sent:
                        print('H-{}\t{}\t{}'.format(sample_id, sample_score, ' '.join(w_lst)))
                    else:
                        print('H-{}\t{}'.format(sample_id, sample_score))
                sample_score_dict[sample_id.item()] = sample_score.item()
                # print(sample_id, sample_score.item())


        return sample_score_dict 
開發者ID:kakaobrain,項目名稱:helo_word,代碼行數:55,代碼來源:lm_scorer.py


注:本文中的fairseq.utils.move_to_cuda方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。