当前位置: 首页>>代码示例>>Python>>正文


Python bleu_score.sentence_bleu方法代码示例

本文整理汇总了Python中nltk.translate.bleu_score.sentence_bleu方法的典型用法代码示例。如果您正苦于以下问题:Python bleu_score.sentence_bleu方法的具体用法?Python bleu_score.sentence_bleu怎么用?Python bleu_score.sentence_bleu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nltk.translate.bleu_score的用法示例。


在下文中一共展示了bleu_score.sentence_bleu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def compute(guess: str, answers: List[str], k: int = 4) -> Optional['BleuMetric']:
        """
        Compute approximate BLEU score between guess and a set of answers.
        """
        if nltkbleu is None:
            # bleu library not installed, just return a default value
            return None
        # Warning: BLEU calculation *should* include proper tokenization and
        # punctuation etc. We're using the normalize_answer for everything though,
        # so we're over-estimating our BLEU scores.  Also note that NLTK's bleu is
        # going to be slower than fairseq's (which is written in C), but fairseq's
        # requires that everything be in arrays of ints (i.e. as tensors). NLTK's
        # works with strings, which is better suited for this module.
        weights = [1 / k for _ in range(k)]
        score = nltkbleu.sentence_bleu(
            [normalize_answer(a).split(" ") for a in answers],
            normalize_answer(guess).split(" "),
            smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
            weights=weights,
        )
        return BleuMetric(score) 
开发者ID:facebookresearch,项目名称:ParlAI,代码行数:23,代码来源:metrics.py

示例2: bleu

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def bleu(answer_file, standard_answer_file):
    rf_answer = open(answer_file, 'r', "utf-8")
    rf_standard_answer = open(standard_answer_file, 'r', "utf-8")
    answer_lines = rf_answer.readlines()
    standard_answer_lines = rf_standard_answer.readlines()
    # compute score
    scores = []
    for i in range(len(answer_lines)):
        candidate = list(answer_lines[i].strip())
        each_score = 0
        for j in range(10):
            references = []
            standard_answer_line = standard_answer_lines[i * 11 + j].strip().split('\t')
            references.append(list(standard_answer_line[0].strip()))
            standard_score = standard_answer_line[1]
            bleu_score = sentence_bleu(references, candidate, weights=(0.35, 0.45, 0.1, 0.1),
                                       smoothing_function=SmoothingFunction().method1)
            each_score = bleu_score * float(standard_score) + each_score
        scores.append(each_score / 10)
    rf_answer.close()
    rf_standard_answer.close()
    score_final = sum(scores) / float(len(answer_lines))
    precision_score = round(score_final, 6)
    return precision_score 
开发者ID:shibing624,项目名称:dialogbot,代码行数:26,代码来源:bleu.py

示例3: _bleu

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def _bleu(guess, answers):
    """Compute approximate BLEU score between guess and a set of answers."""
    if nltkbleu is None:
        # bleu library not installed, just return a default value
        return None
    # Warning: BLEU calculation *should* include proper tokenization and
    # punctuation etc. We're using the normalize_answer for everything though,
    # so we're over-estimating our BLEU scores.  Also note that NLTK's bleu is
    # going to be slower than fairseq's (which is written in C), but fairseq's
    # requires that everything be in arrays of ints (i.e. as tensors). NLTK's
    # works with strings, which is better suited for this module.
    return nltkbleu.sentence_bleu(
        [normalize_answer(a).split(" ") for a in answers],
        normalize_answer(guess).split(" "),
        smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
    ) 
开发者ID:natashamjaques,项目名称:neural_chat,代码行数:18,代码来源:metrics.py

示例4: update_metrics

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def update_metrics(self, resp, gt, source):
    '''
    Params:
      :resp: Response word list.
      :gt: Ground truth word list.
      :source: Source word list.
    '''
    try:
      self.metrics['bleu-1'].append(
        bleu_score.sentence_bleu([gt], resp, weights=(1, 0, 0, 0),
                                 smoothing_function=self.smoothing))
      self.metrics['bleu-2'].append(
        bleu_score.sentence_bleu([gt], resp, weights=(0.5, 0.5, 0, 0),
                                 smoothing_function=self.smoothing))
      self.metrics['bleu-3'].append(
        bleu_score.sentence_bleu([gt], resp, weights=(0.33, 0.33, 0.33, 0),
                                 smoothing_function=self.smoothing))
      self.metrics['bleu-4'].append(
        bleu_score.sentence_bleu([gt], resp, weights=(0.25, 0.25, 0.25, 0.25),
                                 smoothing_function=self.smoothing))
    except (KeyError, ZeroDivisionError):
      self.metrics['bleu-1'].append(0)
      self.metrics['bleu-2'].append(0)
      self.metrics['bleu-3'].append(0)
      self.metrics['bleu-4'].append(0) 
开发者ID:ricsinaruto,项目名称:dialog-eval,代码行数:27,代码来源:bleu_metrics.py

示例5: cal_BLEU_nltk

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def cal_BLEU_nltk(refer, candidate, ngram=1):
    '''
    SmoothingFunction refer to https://github.com/PaddlePaddle/models/blob/a72760dff8574fe2cb8b803e01b44624db3f3eff/PaddleNLP/Research/IJCAI2019-MMPMS/mmpms/utils/metrics.py
    '''
    smoothie = SmoothingFunction().method7
    if ngram == 1:
        weight = (1, 0, 0, 0)
    elif ngram == 2:
        weight = (0.5, 0.5, 0, 0)
    elif ngram == 3:
        weight = (0.33, 0.33, 0.33, 0)
    elif ngram == 4:
        weight = (0.25, 0.25, 0.25, 0.25)
    return sentence_bleu(refer, candidate, 
                         weights=weight, 
                         smoothing_function=smoothie)

# BLEU of nlg-eval 
开发者ID:gmftbyGMFTBY,项目名称:MultiTurnDialogZoo,代码行数:20,代码来源:metric.py

示例6: bleu

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def bleu(reference, predict):
    """Compute sentence-level bleu score.

    Args:
        reference (list[str])
        predict (list[str])
    """
    from nltk.translate import bleu_score

    if len(predict) == 0:
        if len(reference) == 0:
            return 1.0
        else:
            return 0.0

    # TODO(kelvin): is this quite right?
    # use a maximum of 4-grams. If 4-grams aren't present, use only lower n-grams.
    n = min(4, len(reference), len(predict))
    weights = tuple([1. / n] * n)  # uniform weight on n-gram precisions
    return bleu_score.sentence_bleu([reference], predict, weights) 
开发者ID:kelvinguu,项目名称:lang2program,代码行数:22,代码来源:utils.py

示例7: blue_score_text

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def blue_score_text(self,y_actual,y_predicated):
        #check length equal
        assert len(y_actual) ==  len(y_predicated)
        #list of healine .. each headline has words
        no_of_news = len(y_actual)
        blue_score = 0.0
        for i in range(no_of_news):
            reference = y_actual[i]
            hypothesis = y_predicated[i]
            
            #Avoid ZeroDivisionError in blue score
            #default weights
            weights=(0.25, 0.25, 0.25, 0.25)
            min_len_present = min(len(reference),len(hypothesis))
            if min_len_present==0:
                continue
            if min_len_present<4:
                weights=[1.0/min_len_present,]*min_len_present
            
            blue_score = blue_score + sentence_bleu([reference],hypothesis,weights=weights)
        
        return blue_score/float(no_of_news) 
开发者ID:kabrapratik28,项目名称:DeepNews,代码行数:24,代码来源:model.py

示例8: get_bleu

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def get_bleu(self, dataloader, input, reference_key, gen_key):
		refs = []
		gens = []
		for gen_sen, resp_sen in zip(input[gen_key], input[reference_key]):
			gen_sen_processed = dataloader.trim_in_ids(gen_sen)
			resp_sen_processed = dataloader.trim_in_ids(resp_sen[1:])
			refs.append(resp_sen_processed)
			gens.append(gen_sen_processed)
		gens = replace_unk(gens)
		bleu_irl_bw, bleu_irl_fw = [], []
		for i in range(len(gens)):
			bleu_irl_fw.append(sentence_bleu(refs, gens[i], smoothing_function=SmoothingFunction().method1))
		for i in range(len(refs)):
			bleu_irl_bw.append(sentence_bleu(gens, refs[i], smoothing_function=SmoothingFunction().method1))

		fw_bleu = (1.0 * sum(bleu_irl_fw) / len(bleu_irl_fw))
		bw_bleu = (1.0 * sum(bleu_irl_bw) / len(bleu_irl_bw))
		return 2.0 * bw_bleu * fw_bleu / (fw_bleu + bw_bleu) 
开发者ID:thu-coai,项目名称:cotk,代码行数:20,代码来源:test_bleu.py

示例9: _score

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def _score(self, gen: List[int], reference: List[int]) -> float:
		'''Return a BLEU score \in [0, 1] to calculate BLEU-ngram precision and recall.

		Arguments:
			gen (list): list of generated word ids.
			reference (list): list of word ids of a reference.

		Here is an Example:

			>>> gen = [4,5]
			>>> reference = [5,6]
			>>> self._score(gen, reference)
			0.150 # assume self.weights = [0.25,0.25,0.25,0.25]
		'''
		gen = self._replace_unk(gen)
		return sentence_bleu([reference], gen, self.weights, SmoothingFunction().method1) 
开发者ID:thu-coai,项目名称:cotk,代码行数:18,代码来源:precision_recall.py

示例10: print_batch

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def print_batch(learner: Learner, modeldata: ModelData, input_field, output_field, num_batches=1, num_sentences=-1,
                is_test=False, num_beams=1, weights=None, smoothing_function=None):
    predictions, targets, inputs = learner.predict_with_targs_and_inputs(is_test=is_test, num_beams=num_beams)
    weights = (1 / 3., 1 / 3., 1 / 3.) if weights is None else weights
    smoothing_function = SmoothingFunction().method1 if smoothing_function is None else smoothing_function
    blue_scores = []
    for batch_num, (input, target, prediction) in enumerate(zip(inputs, targets, predictions)):
        inputs_str: BatchBeamTokens = modeldata.itos(input, input_field)
        predictions_str: BatchBeamTokens = modeldata.itos(prediction, output_field)
        targets_str: BatchBeamTokens = modeldata.itos(target, output_field)
        for index, (inp, targ, pred) in enumerate(zip(inputs_str, targets_str, predictions_str)):
            blue_score = sentence_bleu([targ], pred, smoothing_function=smoothing_function, weights=weights)
            print(
                f'batch: {batch_num} sample : {index}\ninput: {" ".join(inp)}\ntarget: { " ".join(targ)}\nprediction: {" ".join(pred)}\nbleu: {blue_score}\n\n')
            blue_scores.append(blue_score)
            if 0 < num_sentences <= index - 1:
                break
        if 0 < num_batches <= batch_num - 1:
            break
    print(f'mean bleu score: {np.mean(blue_scores)}') 
开发者ID:outcastofmusic,项目名称:quick-nlp,代码行数:22,代码来源:utils.py

示例11: sim_bleu

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def sim_bleu(self, hyps, ref):
        """
        :param ref - a list of tokens of the reference
        :param hyps - a list of tokens of the hypothesis
    
        :return maxbleu - recall bleu
        :return avgbleu - precision bleu
        """
        scores = []
        for hyp in hyps:
            try:
               # scores.append(sentence_bleu([ref], hyp, smoothing_function=SmoothingFunction().method7,
               #                         weights=[1./4, 1./4, 1./4, 1./4]))
                scores.append(smoothed_bleu(list(bleu_stats(hyp, ref))))
            except:
                scores.append(0.0)
        return np.max(scores), np.mean(scores) 
开发者ID:guxd,项目名称:deepAPI,代码行数:19,代码来源:metrics.py

示例12: reward_function

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def reward_function(self, summary, reference):
    """Calculate the reward between the reference and summary.

    Args:
      reference: A list of ids representing the ground-truth data
      summary: A list of ids representing the model generated data

    Returns:
      A single value representing the evaluation value for reference and summary
    """
    if 'rouge' in self._hps.reward_function:
      return rouge([summary],[reference])[self._hps.reward_function]
    else:
      return sentence_bleu([reference.split()],summary.split(),weights=(0.25,0.25,0.25,0.25)) 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:16,代码来源:model.py

示例13: bleu_score

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def bleu_score(candidate, reference):
    score = sentence_bleu(
        [list(reference)], list(candidate),
        weights=(0.25, 0.25, 0.25, 0.25),
        smoothing_function=SmoothingFunction().method1)
    return score 
开发者ID:shibing624,项目名称:dialogbot,代码行数:8,代码来源:bleu.py

示例14: reward_function

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def reward_function(self, reference, summary, measure='rouge_l/f_score'):
    """Calculate the reward between the reference and summary.

    Args:
      reference: A list of ids representing the ground-truth data
      summary: A list of ids representing the model generated data

    Returns:
      A single value representing the evaluation value for reference and summary
    """
    if 'rouge' in measure:
      return rouge([summary],[reference])[measure]
    else:
      return sentence_bleu([reference.split()],summary.split(),weights=(0.25,0.25,0.25,0.25)) 
开发者ID:yaserkl,项目名称:RLSeq2Seq,代码行数:16,代码来源:model.py

示例15: bleuMatch

# 需要导入模块: from nltk.translate import bleu_score [as 别名]
# 或者: from nltk.translate.bleu_score import sentence_bleu [as 别名]
def bleuMatch(ref, ex, ignoreStopwords, ignoreCase):
        sRef = ref.bow()
        sEx = ex.bow()
        bleu = sentence_bleu(references = [sRef.split(' ')], hypothesis = sEx.split(' '))
        return bleu > Matcher.BLEU_THRESHOLD 
开发者ID:gabrielStanovsky,项目名称:oie-benchmark,代码行数:7,代码来源:matcher.py


注:本文中的nltk.translate.bleu_score.sentence_bleu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。