當前位置: 首頁>>代碼示例>>Python>>正文


Python sacremoses.MosesTokenizer方法代碼示例

本文整理匯總了Python中sacremoses.MosesTokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python sacremoses.MosesTokenizer方法的具體用法?Python sacremoses.MosesTokenizer怎麽用?Python sacremoses.MosesTokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sacremoses的用法示例。


在下文中一共展示了sacremoses.MosesTokenizer方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def __init__(self, *args, **kwargs):
        if 'tokenize' in kwargs:
            raise TypeError('``MosesEncoder`` does not take keyword argument ``tokenize``.')

        if 'detokenize' in kwargs:
            raise TypeError('``MosesEncoder`` does not take keyword argument ``detokenize``.')

        try:
            from sacremoses import MosesTokenizer
            from sacremoses import MosesDetokenizer
        except ImportError:
            print("Please install SacreMoses. "
                  "See the docs at https://github.com/alvations/sacremoses for more information.")
            raise

        super().__init__(
            *args,
            tokenize=MosesTokenizer().tokenize,
            detokenize=partial(MosesDetokenizer().detokenize, return_str=True),
            **kwargs) 
開發者ID:PetrochukM,項目名稱:PyTorch-NLP,代碼行數:22,代碼來源:moses_encoder.py

示例2: normalize

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def normalize(sentence, lowercase: bool = True, tokenizer: str = '13a', return_str: bool = True):
    if lowercase:
        sentence = sentence.lower()

    if tokenizer == "13a":
        normalized_sent = sacrebleu.tokenize_13a(sentence)
    elif tokenizer == "intl":
        normalized_sent = sacrebleu.tokenize_v14_international(sentence)
    elif tokenizer == "moses":
        normalized_sent = sacremoses.MosesTokenizer().tokenize(sentence, return_str=True, escape=False)
    elif tokenizer == "penn":
        normalized_sent = sacremoses.MosesTokenizer().penn_tokenize(sentence, return_str=True)
    else:
        normalized_sent = sentence

    if not return_str:
        normalized_sent = normalized_sent.split()

    return normalized_sent 
開發者ID:feralvam,項目名稱:easse,代碼行數:21,代碼來源:preprocessing.py

示例3: __init__

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def __init__(self, args):
        self.args = args

        if getattr(args, 'moses_source_lang', None) is None:
            args.moses_source_lang = getattr(args, 'source_lang', 'en')
        if getattr(args, 'moses_target_lang', None) is None:
            args.moses_target_lang = getattr(args, 'target_lang', 'en')

        try:
            from sacremoses import MosesTokenizer, MosesDetokenizer
            self.tok = MosesTokenizer(args.moses_source_lang)
            self.detok = MosesDetokenizer(args.moses_target_lang)
        except ImportError:
            raise ImportError('Please install Moses tokenizer with: pip install sacremoses') 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:16,代碼來源:moses_tokenizer.py

示例4: __init__

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def __init__(self,
                 lang: str = 'en',
                 lower_case: bool = True,
                 romanize: Optional[bool] = None,
                 descape: bool = False):
        assert lower_case, 'lower case is needed by all the models'

        if lang in ('cmn', 'wuu', 'yue'):
            lang = 'zh'
        if lang == 'jpn':
            lang = 'ja'

        if lang == 'zh' and jieba is None:
            raise ModuleNotFoundError(
                '''No module named 'jieba'. Install laserembeddings with 'zh' extra to fix that: "pip install laserembeddings[zh]"'''
            )
        if lang == 'ja' and MeCab is None:
            raise ModuleNotFoundError(
                '''No module named 'MeCab'. Install laserembeddings with 'ja' extra to fix that: "pip install laserembeddings[ja]"'''
            )

        self.lang = lang
        self.lower_case = lower_case
        self.romanize = romanize if romanize is not None else lang == 'el'
        self.descape = descape

        self.normalizer = MosesPunctNormalizer(lang=lang)
        self.tokenizer = MosesTokenizer(lang=lang)
        self.mecab_tokenizer = MeCab.Tagger(
            "-O wakati -b 50000") if lang == 'ja' else None 
開發者ID:yannvgn,項目名稱:laserembeddings,代碼行數:32,代碼來源:preprocessing.py

示例5: tokenize_captions

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def tokenize_captions(captions, lang='en'):
    """Tokenizes captions list with Moses tokenizer.
    """

    tokenizer = MosesTokenizer(lang=lang)
    return [tokenizer.tokenize(caption, return_str=True) for caption in captions] 
開發者ID:krasserm,項目名稱:fairseq-image-captioning,代碼行數:8,代碼來源:tokenize_captions.py

示例6: moses_tokenize

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def moses_tokenize(self, text, lang):
        if lang not in self.cache_moses_tokenizer:
            moses_tokenizer = sm.MosesTokenizer(lang=lang)
            self.cache_moses_tokenizer[lang] = moses_tokenizer
        else:
            moses_tokenizer = self.cache_moses_tokenizer[lang]
        return moses_tokenizer.tokenize(text, return_str=False, escape=False) 
開發者ID:bhoov,項目名稱:exbert,代碼行數:9,代碼來源:tokenization_xlm.py

示例7: __init__

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def __init__(self, escape: bool = False, *args, **kwargs):
        self.escape = escape
        self.tokenizer = MosesTokenizer()
        self.detokenizer = MosesDetokenizer() 
開發者ID:deepmipt,項目名稱:DeepPavlov,代碼行數:6,代碼來源:nltk_moses_tokenizer.py

示例8: enable_moses

# 需要導入模塊: import sacremoses [as 別名]
# 或者: from sacremoses import MosesTokenizer [as 別名]
def enable_moses(self, lang='en', tokenize=True, detokenize=True):
        if tokenize:
            self._moses_tok = MosesTokenizer(lang=lang)
        else:
            self._moses_tok = None

        if detokenize:
            self._moses_detok = MosesDetokenizer(lang=lang)
        else:
            self._moses_detok = None 
開發者ID:eladhoffer,項目名稱:seq2seq.pytorch,代碼行數:12,代碼來源:tokenizer.py


注:本文中的sacremoses.MosesTokenizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。