當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenizers.Tokenizer方法代碼示例

本文整理匯總了Python中allennlp.data.tokenizers.Tokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenizers.Tokenizer方法的具體用法?Python tokenizers.Tokenizer怎麽用?Python tokenizers.Tokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.data.tokenizers的用法示例。


在下文中一共展示了tokenizers.Tokenizer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(
        self,
        token_indexers: Dict[str, TokenIndexer] = None,
        tokenizer: Tokenizer = None,
        segment_sentences: bool = False,
        max_sequence_length: int = None,
        skip_label_indexing: bool = False,
        **kwargs,
    ) -> None:
        super().__init__(**kwargs)
        self._tokenizer = tokenizer or SpacyTokenizer()
        self._segment_sentences = segment_sentences
        self._max_sequence_length = max_sequence_length
        self._skip_label_indexing = skip_label_indexing
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
        if self._segment_sentences:
            self._sentence_segmenter = SpacySentenceSplitter() 
開發者ID:allenai,項目名稱:allennlp,代碼行數:19,代碼來源:text_classification_json.py

示例2: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(
        self,
        target_namespace: str,
        source_tokenizer: Tokenizer = None,
        target_tokenizer: Tokenizer = None,
        source_token_indexers: Dict[str, TokenIndexer] = None,
        target_token_indexers: Dict[str, TokenIndexer] = None,
        lazy: bool = False,
    ) -> None:
        super().__init__(lazy)
        self._target_namespace = target_namespace
        self._source_tokenizer = source_tokenizer or SpacyTokenizer()
        self._target_tokenizer = target_tokenizer or self._source_tokenizer
        self._source_token_indexers = source_token_indexers or {
            "tokens": SingleIdTokenIndexer()
        }
        self._target_token_indexers = (
            target_token_indexers or self._source_token_indexers
        )
        warnings.warn(
            "The 'copynet' dataset reader has been deprecated in favor of the "
            "'copynet_seq2seq' dataset reader (now part of the AllenNLP library).",
            DeprecationWarning,
        ) 
開發者ID:epwalsh,項目名稱:nlp-models,代碼行數:26,代碼來源:copynet.py

示例3: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(
        self,
        target_namespace: str,
        source_tokenizer: Tokenizer = None,
        target_tokenizer: Tokenizer = None,
        source_token_indexers: Dict[str, TokenIndexer] = None,
        target_token_indexers: Dict[str, TokenIndexer] = None,
        lazy: bool = False,
    ) -> None:
        source_tokenizer = source_tokenizer or NL2BashWordSplitter()
        target_tokenizer = target_tokenizer or source_tokenizer
        super().__init__(
            target_namespace,
            source_tokenizer=source_tokenizer,
            target_tokenizer=target_tokenizer,
            source_token_indexers=source_token_indexers,
            target_token_indexers=target_token_indexers,
            lazy=lazy,
        ) 
開發者ID:epwalsh,項目名稱:nlp-models,代碼行數:21,代碼來源:nl2bash.py

示例4: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 debug: bool = False,
                 tokenizer: Tokenizer = None,
                 include_more_numbers: bool = False,
                 skip_when_all_empty: List[str] = None,
                 max_number_of_answer: int = 8,
                 max_number_count: int = 10,
                 logger = None) -> None:
        super().__init__()
        self.debug = debug
        self._tokenizer = tokenizer or WordTokenizer()
        self.include_more_numbers = include_more_numbers
        self.max_number_of_answer = max_number_of_answer
        self.max_number_count = max_number_count
        self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
        for item in self.skip_when_all_empty:
            assert item in ["passage_span", "question_span", "addition_subtraction", "counting", "negation"], \
                f"Unsupported skip type: {item}"
        self.logger = logger 
開發者ID:huminghao16,項目名稱:MTMSN,代碼行數:21,代碼來源:drop_utils.py

示例5: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self, utterances: List[str], tokenizer: Tokenizer = None) -> None:
        if AtisWorld.sql_table_context is None:
            AtisWorld.sql_table_context = AtisSqlTableContext(
                atis_tables.ALL_TABLES, atis_tables.TABLES_WITH_STRINGS, AtisWorld.database_file
            )
        self.utterances: List[str] = utterances
        self.tokenizer = tokenizer if tokenizer else SpacyTokenizer()
        self.tokenized_utterances = [
            self.tokenizer.tokenize(utterance) for utterance in self.utterances
        ]
        self.dates = self._get_dates()
        self.linked_entities = self._get_linked_entities()

        entities, linking_scores = self._flatten_entities()
        # This has shape (num_entities, num_utterance_tokens).
        self.linking_scores: numpy.ndarray = linking_scores
        self.entities: List[str] = entities
        self.grammar: Grammar = self._update_grammar()
        self.valid_actions = initialize_valid_actions(self.grammar, KEYWORDS) 
開發者ID:allenai,項目名稱:allennlp-semparse,代碼行數:21,代碼來源:atis_world.py

示例6: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(
        self,
        lazy: bool = False,
        tokenizer: Tokenizer = None,
        sentence_token_indexers: Dict[str, TokenIndexer] = None,
        nonterminal_indexers: Dict[str, TokenIndexer] = None,
        terminal_indexers: Dict[str, TokenIndexer] = None,
        output_agendas: bool = True,
    ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or SpacyTokenizer()
        self._sentence_token_indexers = sentence_token_indexers or {
            "tokens": SingleIdTokenIndexer()
        }
        self._nonterminal_indexers = nonterminal_indexers or {
            "tokens": SingleIdTokenIndexer("rule_labels")
        }
        self._terminal_indexers = terminal_indexers or {
            "tokens": SingleIdTokenIndexer("rule_labels")
        }
        self._output_agendas = output_agendas 
開發者ID:allenai,項目名稱:allennlp-semparse,代碼行數:23,代碼來源:nlvr.py

示例7: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 words_per_instance: int = 35
                ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer(
            start_tokens=[START_SYMBOL],
            end_tokens=[END_SYMBOL]
        )
        self._token_indexers = token_indexers or {
            "tokens": SingleIdTokenIndexer(namespace="tokens", lowercase_tokens=True)
        }

        self._words_per_instance = words_per_instance 
開發者ID:dangitstam,項目名稱:topic-rnn,代碼行數:18,代碼來源:imdb_review_reader.py

示例8: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 clean_citation: bool = True,
                 with_elmo: bool = False
                 ) -> None:
        super().__init__(lazy)
        self._clean_citation = clean_citation
        self._tokenizer = tokenizer or WordTokenizer()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
        if with_elmo:
            self._token_indexers = {"elmo": ELMoTokenCharactersIndexer(),
                                    "tokens": SingleIdTokenIndexer()}
        else:
            self._token_indexers = {"tokens": SingleIdTokenIndexer()} 
開發者ID:allenai,項目名稱:scicite,代碼行數:18,代碼來源:citation_data_reader_scicite_aux.py

示例9: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 min_doc_length:int = -1,
                 min_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer() # little bit faster, useful for multicore proc. word_splitter=SimpleWordSplitter()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)}
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length
        self.min_doc_length = min_doc_length
        self.min_query_length = min_query_length

        self.padding_value = Token(text = "@@PADDING@@",text_id=0) 
開發者ID:sebastian-hofstaetter,項目名稱:transformer-kernel-ranking,代碼行數:21,代碼來源:ir_labeled_tuple_loader.py

示例10: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 min_doc_length:int = -1,
                 min_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer
        self._token_indexers = token_indexers
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length
        self.min_doc_length = min_doc_length
        self.min_query_length = min_query_length

        self.padding_value = Token(text = "[PAD]",text_id=0)
        self.sep_value = Token(text = "[SEP]") 
開發者ID:sebastian-hofstaetter,項目名稱:transformer-kernel-ranking,代碼行數:22,代碼來源:bert_labeled_tuple_loader.py

示例11: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 tokenizer: Tokenizer,
                 language: str,
                 source_token_indexers: Dict[str, TokenIndexer] = None,
                 max_sentences_count: int = 100,
                 sentence_max_tokens: int = 100,
                 lowercase: bool = True,
                 lazy: bool = True) -> None:
        super().__init__(lazy=lazy)

        self._tokenizer = tokenizer
        self._lowercase = lowercase
        self._language = language
        self._max_sentences_count = max_sentences_count
        self._sentence_max_tokens = sentence_max_tokens
        self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()} 
開發者ID:IlyaGusev,項目名稱:summarus,代碼行數:18,代碼來源:summarization_sentence_tagger_reader.py

示例12: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 tokenizer: Tokenizer = None,
                 max_sequence_length: int = None,
                 ignore_labels: bool = False,
                 sample: int = None,
                 skip_label_indexing: bool = False,
                 lazy: bool = False) -> None:
        super().__init__(lazy=lazy,
                         token_indexers=token_indexers,
                         tokenizer=tokenizer,
                         max_sequence_length=max_sequence_length,
                         skip_label_indexing=skip_label_indexing)
        self._tokenizer = tokenizer or WordTokenizer()
        self._sample = sample
        self._max_sequence_length = max_sequence_length
        self._ignore_labels = ignore_labels
        self._skip_label_indexing = skip_label_indexing
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
        if self._segment_sentences:
            self._sentence_segmenter = SpacySentenceSplitter() 
開發者ID:allenai,項目名稱:vampire,代碼行數:23,代碼來源:semisupervised_text_classification_json.py

示例13: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 ) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()} 
開發者ID:allenai,項目名稱:scibert,代碼行數:10,代碼來源:classification_dataset_reader.py

示例14: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 source_add_start_token: bool = True,
                 max_doc_length:int = -1,
                 max_query_length:int = -1,
                 lazy: bool = False) -> None:
        super().__init__(lazy)
        self._tokenizer = tokenizer or WordTokenizer() # little bit faster, useful for multicore proc. word_splitter=SimpleWordSplitter()
        self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)}
        self._source_add_start_token = source_add_start_token
        self.max_doc_length = max_doc_length
        self.max_query_length = max_query_length 
開發者ID:sebastian-hofstaetter,項目名稱:teaching,代碼行數:15,代碼來源:data_loading.py

示例15: __init__

# 需要導入模塊: from allennlp.data import tokenizers [as 別名]
# 或者: from allennlp.data.tokenizers import Tokenizer [as 別名]
def __init__(self,
                 lazy: bool = False,
                 tokenizer: Tokenizer = None,
                 token_indexers: Dict[str, TokenIndexer] = None,
                 max_token_len: int = 512):
        super().__init__(lazy)

        self._tokenizer = tokenizer or CharacterTokenizer()
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
        self._max_token_len = max_token_len 
開發者ID:mhagiwara,項目名稱:nanigonet,代碼行數:12,代碼來源:dataset_reader.py


注:本文中的allennlp.data.tokenizers.Tokenizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。