当前位置: 首页>>代码示例>>Python>>正文


Python tokenizers.Tokenizer方法代码示例

本文整理汇总了Python中allennlp.data.tokenizers.Tokenizer方法的典型用法代码示例。如果您正苦于以下问题:Python tokenizers.Tokenizer方法的具体用法?Python tokenizers.Tokenizer怎么用?Python tokenizers.Tokenizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.data.tokenizers的用法示例。


在下文中一共展示了tokenizers.Tokenizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(
        self,
        token_indexers: Dict[str, TokenIndexer] = None,
        tokenizer: Tokenizer = None,
        segment_sentences: bool = False,
        max_sequence_length: int = None,
        skip_label_indexing: bool = False,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self._tokenizer = tokenizer or SpacyTokenizer()
        self._segment_sentences = segment_sentences
        self._max_sequence_length = max_sequence_length
        self._skip_label_indexing = skip_label_indexing
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
        if self._segment_sentences:
            self._sentence_segmenter = SpacySentenceSplitter() 
开发者ID:allenai,项目名称:allennlp,代码行数:19,代码来源:text_classification_json.py

示例2: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(
        self,
        target_namespace: str,
        source_tokenizer: Tokenizer = None,
        target_tokenizer: Tokenizer = None,
        source_token_indexers: Dict[str, TokenIndexer] = None,
        target_token_indexers: Dict[str, TokenIndexer] = None,
        lazy: bool = False,
    ) -> None:
        super().__init__(lazy)
        self._target_namespace = target_namespace
        self._source_tokenizer = source_tokenizer or SpacyTokenizer()
        self._target_tokenizer = target_tokenizer or self._source_tokenizer
        self._source_token_indexers = source_token_indexers or {
            "tokens": SingleIdTokenIndexer()
        }
        self._target_token_indexers = (
            target_token_indexers or self._source_token_indexers
        )
        warnings.warn(
            "The 'copynet' dataset reader has been deprecated in favor of the "
            "'copynet_seq2seq' dataset reader (now part of the AllenNLP library).",
            DeprecationWarning,
        ) 
开发者ID:epwalsh,项目名称:nlp-models,代码行数:26,代码来源:copynet.py

示例3: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(
        self,
        target_namespace: str,
        source_tokenizer: Tokenizer = None,
        target_tokenizer: Tokenizer = None,
        source_token_indexers: Dict[str, TokenIndexer] = None,
        target_token_indexers: Dict[str, TokenIndexer] = None,
        lazy: bool = False,
    ) -> None:
        source_tokenizer = source_tokenizer or NL2BashWordSplitter()
        target_tokenizer = target_tokenizer or source_tokenizer
        super().__init__(
            target_namespace,
            source_tokenizer=source_tokenizer,
            target_tokenizer=target_tokenizer,
            source_token_indexers=source_token_indexers,
            target_token_indexers=target_token_indexers,
            lazy=lazy,
        ) 
开发者ID:epwalsh,项目名称:nlp-models,代码行数:21,代码来源:nl2bash.py

示例4: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 debug: bool = False,
                 tokenizer: Tokenizer = None,
                 include_more_numbers: bool = False,
                 skip_when_all_empty: List[str] = None,
                 max_number_of_answer: int = 8,
                 max_number_count: int = 10,
                 logger = None) -> None:
        super().__init__()
        self.debug = debug
        self._tokenizer = tokenizer or WordTokenizer()
        self.include_more_numbers = include_more_numbers
        self.max_number_of_answer = max_number_of_answer
        self.max_number_count = max_number_count
        self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
        for item in self.skip_when_all_empty:
            assert item in ["passage_span", "question_span", "addition_subtraction", "counting", "negation"], \
                f"Unsupported skip type: {item}"
        self.logger = logger 
开发者ID:huminghao16,项目名称:MTMSN,代码行数:21,代码来源:drop_utils.py

示例5: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self, utterances: List[str], tokenizer: Tokenizer = None) -> None:
        if AtisWorld.sql_table_context is None:
            AtisWorld.sql_table_context = AtisSqlTableContext(
                atis_tables.ALL_TABLES, atis_tables.TABLES_WITH_STRINGS, AtisWorld.database_file
            )
        self.utterances: List[str] = utterances
        self.tokenizer = tokenizer if tokenizer else SpacyTokenizer()
        self.tokenized_utterances = [
            self.tokenizer.tokenize(utterance) for utterance in self.utterances
        ]
        self.dates = self._get_dates()
        self.linked_entities = self._get_linked_entities()

        entities, linking_scores = self._flatten_entities()
        # This has shape (num_entities, num_utterance_tokens).
        self.linking_scores: numpy.ndarray = linking_scores
        self.entities: List[str] = entities
        self.grammar: Grammar = self._update_grammar()
        self.valid_actions = initialize_valid_actions(self.grammar, KEYWORDS) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:21,代码来源:atis_world.py

示例6: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(
        self,
        lazy: bool = False,
        tokenizer: Tokenizer = None,
        sentence_token_indexers: Dict[str, TokenIndexer] = None,
        nonterminal_indexers: Dict[str, TokenIndexer] = None,
        terminal_indexers: Dict[str, TokenIndexer] = None,
        output_agendas: bool = True,
    ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or SpacyTokenizer()
        self._sentence_token_indexers = sentence_token_indexers or {
            "tokens": SingleIdTokenIndexer()
        }
        self._nonterminal_indexers = nonterminal_indexers or {
            "tokens": SingleIdTokenIndexer("rule_labels")
        }
        self._terminal_indexers = terminal_indexers or {
            "tokens": SingleIdTokenIndexer("rule_labels")
        }
        self._output_agendas = output_agendas 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:23,代码来源:nlvr.py

示例7: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 words_per_instance: int = 35
                ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer(
            start_tokens=[START_SYMBOL],
            end_tokens=[END_SYMBOL]
        )
        self._token_indexers = token_indexers or {
            "tokens": SingleIdTokenIndexer(namespace="tokens", lowercase_tokens=True)
        }

        self._words_per_instance = words_per_instance 
开发者ID:dangitstam,项目名称:topic-rnn,代码行数:18,代码来源:imdb_review_reader.py

示例8: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 clean_citation: bool = True,
                 with_elmo: bool = False
                 ) -> None:
        super().__init__(lazy)
        self._clean_citation = clean_citation
        self._tokenizer = tokenizer or WordTokenizer()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
        if with_elmo:
            self._token_indexers = {"elmo": ELMoTokenCharactersIndexer(),
                                    "tokens": SingleIdTokenIndexer()}
        else:
            self._token_indexers = {"tokens": SingleIdTokenIndexer()} 
开发者ID:allenai,项目名称:scicite,代码行数:18,代码来源:citation_data_reader_scicite_aux.py

示例9: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 min_doc_length:int = -1,
                 min_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer() # little bit faster, useful for multicore proc. word_splitter=SimpleWordSplitter()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)}
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length
        self.min_doc_length = min_doc_length
        self.min_query_length = min_query_length

        self.padding_value = Token(text = "@@PADDING@@",text_id=0) 
开发者ID:sebastian-hofstaetter,项目名称:transformer-kernel-ranking,代码行数:21,代码来源:ir_labeled_tuple_loader.py

示例10: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 min_doc_length:int = -1,
                 min_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer
        self._token_indexers = token_indexers
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length
        self.min_doc_length = min_doc_length
        self.min_query_length = min_query_length

        self.padding_value = Token(text = "[PAD]",text_id=0)
        self.sep_value = Token(text = "[SEP]") 
开发者ID:sebastian-hofstaetter,项目名称:transformer-kernel-ranking,代码行数:22,代码来源:bert_labeled_tuple_loader.py

示例11: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 tokenizer: Tokenizer,
                 language: str,
                 source_token_indexers: Dict[str, TokenIndexer] = None,
                 max_sentences_count: int = 100,
                 sentence_max_tokens: int = 100,
                 lowercase: bool = True,
                 lazy: bool = True) -> None:
        super().__init__(lazy=lazy)

        self._tokenizer = tokenizer
        self._lowercase = lowercase
        self._language = language
        self._max_sentences_count = max_sentences_count
        self._sentence_max_tokens = sentence_max_tokens
        self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()} 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:18,代码来源:summarization_sentence_tagger_reader.py

示例12: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 tokenizer: Tokenizer = None,
                 max_sequence_length: int = None,
                 ignore_labels: bool = False,
                 sample: int = None,
                 skip_label_indexing: bool = False,
                 lazy: bool = False) -> None:
        super().__init__(lazy=lazy,
                         token_indexers=token_indexers,
                         tokenizer=tokenizer,
                         max_sequence_length=max_sequence_length,
                         skip_label_indexing=skip_label_indexing)
        self._tokenizer = tokenizer or WordTokenizer()
        self._sample = sample
        self._max_sequence_length = max_sequence_length
        self._ignore_labels = ignore_labels
        self._skip_label_indexing = skip_label_indexing
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
        if self._segment_sentences:
            self._sentence_segmenter = SpacySentenceSplitter() 
开发者ID:allenai,项目名称:vampire,代码行数:23,代码来源:semisupervised_text_classification_json.py

示例13: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()} 
开发者ID:allenai,项目名称:scibert,代码行数:10,代码来源:classification_dataset_reader.py

示例14: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer() # little bit faster, useful for multicore proc. word_splitter=SimpleWordSplitter()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)}
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length 
开发者ID:sebastian-hofstaetter,项目名称:teaching,代码行数:15,代码来源:data_loading.py

示例15: __init__

# 需要导入模块: from allennlp.data import tokenizers [as 别名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 别名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 max_token_len: int = 512):
        super().__init__(lazy)

        self._tokenizer = tokenizer or CharacterTokenizer()
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
        self._max_token_len = max_token_len 
开发者ID:mhagiwara,项目名称:nanigonet,代码行数:12,代码来源:dataset_reader.py


注:本文中的allennlp.data.tokenizers.Tokenizer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。