当前位置: 首页>>代码示例>>Python>>正文


Python util.namespace_match方法代码示例

本文整理汇总了Python中allennlp.common.util.namespace_match方法的典型用法代码示例。如果您正苦于以下问题:Python util.namespace_match方法的具体用法?Python util.namespace_match怎么用?Python util.namespace_match使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.util的用法示例。


在下文中一共展示了util.namespace_match方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: initialize_dictionary

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def initialize_dictionary(self, namespace: str, unk_num: int, mode: MappingMode):
        if mode == MappingMode.token2index:
            if any(namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces):
                dict.__setitem__(self, namespace, {})
            else:
                init_namespace_dictionary = RandomHashDict(unk_num=unk_num, oov_token=self.oov_token)
                init_namespace_dictionary.update({self.padding_token: 0})
                init_namespace_dictionary.add_unk_tokens()

                dict.__setitem__(self, namespace, init_namespace_dictionary)

        elif mode == MappingMode.index2token:
            if any(namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces):
                dict.__setitem__(self, namespace, {})
            else:
                init_namespace_dictionary = {0: self.padding_token}
                for i in range(unk_num):
                    init_namespace_dictionary[len(init_namespace_dictionary)] = f"@@{self.oov_token}#{str(i)}@@"

                dict.__setitem__(self, namespace, init_namespace_dictionary) 
开发者ID:easonnie,项目名称:combine-FEVER-NSMN,代码行数:22,代码来源:exvocab.py

示例2: __missing__

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def __missing__(self, key: str):
        if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
            value = self._non_padded_function()
        else:
            value = self._padded_function()
        dict.__setitem__(self, key, value)
        return value 
开发者ID:allenai,项目名称:allennlp,代码行数:9,代码来源:vocabulary.py

示例3: test_namespace_match

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def test_namespace_match(self):
        assert util.namespace_match("*tags", "tags")
        assert util.namespace_match("*tags", "passage_tags")
        assert util.namespace_match("*tags", "question_tags")
        assert util.namespace_match("tokens", "tokens")
        assert not util.namespace_match("tokens", "stemmed_tokens") 
开发者ID:allenai,项目名称:allennlp,代码行数:8,代码来源:util_test.py

示例4: __missing__

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def __missing__(self, key     ):
        if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
            value = self._non_padded_function()
        else:
            value = self._padded_function()
        dict.__setitem__(self, key, value)
        return value 
开发者ID:plasticityai,项目名称:magnitude,代码行数:9,代码来源:vocabulary.py

示例5: from_files

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def from_files(cls, directory     )                :
        u"""
        Loads a ``Vocabulary`` that was serialized using ``save_to_files``.

        Parameters
        ----------
        directory : ``str``
            The directory containing the serialized vocabulary.
        """
        logger.info(u"Loading token dictionary from %s.", directory)
        with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), u'r', u'utf-8') as namespace_file:
            non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]

        vocab = Vocabulary(non_padded_namespaces=non_padded_namespaces)

        # Check every file in the directory.
        for namespace_filename in os.listdir(directory):
            if namespace_filename == NAMESPACE_PADDING_FILE:
                continue
            namespace = namespace_filename.replace(u'.txt', u'')
            if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
                is_padded = False
            else:
                is_padded = True
            filename = os.path.join(directory, namespace_filename)
            vocab.set_from_file(filename, is_padded, namespace=namespace)

        return vocab 
开发者ID:plasticityai,项目名称:magnitude,代码行数:30,代码来源:vocabulary.py

示例6: test_namespace_match

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def test_namespace_match(self):
        assert util.namespace_match(u"*tags", u"tags")
        assert util.namespace_match(u"*tags", u"passage_tags")
        assert util.namespace_match(u"*tags", u"question_tags")
        assert util.namespace_match(u"tokens", u"tokens")
        assert not util.namespace_match(u"tokens", u"stemmed_tokens") 
开发者ID:plasticityai,项目名称:magnitude,代码行数:8,代码来源:test_util.py

示例7: from_files

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def from_files(cls, directory: str) -> 'Vocabulary':
        """
        Loads a ``Vocabulary`` that was serialized using ``save_to_files``.

        Parameters
        ----------
        directory : ``str``
            The directory containing the serialized vocabulary.
        """
        logger.info("Loading token dictionary from %s.", directory)
        with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', 'utf-8') as namespace_file:
            non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]

        vocab = VocabularyMultitask(non_padded_namespaces=non_padded_namespaces)

        # Check every file in the directory.
        for namespace_filename in os.listdir(directory):
            if namespace_filename == NAMESPACE_PADDING_FILE:
                continue
            namespace = namespace_filename.replace('.txt', '')
            if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
                is_padded = False
            else:
                is_padded = True
            filename = os.path.join(directory, namespace_filename)
            vocab.set_from_file(filename, is_padded, namespace=namespace)

        return vocab 
开发者ID:allenai,项目名称:scicite,代码行数:30,代码来源:vocabulary_multitask.py

示例8: from_files

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def from_files(cls, directory: str) -> 'Vocabulary':
        """
        Loads a ``Vocabulary`` that was serialized using ``save_to_files``.
        Parameters
        ----------
        directory : ``str``
            The directory containing the serialized vocabulary.
        """

        logger.info("Loading token dictionary from %s.", directory)
        with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', 'utf-8') as namespace_file:
            non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]

        vocab = cls(non_padded_namespaces=non_padded_namespaces)
        vocab.serialization_dir = directory  # pylint: disable=W0201
        # Check every file in the directory.
        for namespace_filename in os.listdir(directory):
            if namespace_filename == NAMESPACE_PADDING_FILE:
                continue
            if namespace_filename.startswith("."):
                continue
            namespace = namespace_filename.replace('.txt', '')
            if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
                is_padded = False
            else:
                is_padded = True
            filename = os.path.join(directory, namespace_filename)
            vocab.set_from_file(filename, is_padded, namespace=namespace)

        return vocab 
开发者ID:allenai,项目名称:vampire,代码行数:32,代码来源:allennlp_bridge.py

示例9: from_files

# 需要导入模块: from allennlp.common import util [as 别名]
# 或者: from allennlp.common.util import namespace_match [as 别名]
def from_files(
        cls,
        directory: str,
        padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
        oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
    ) -> "Vocabulary":
        """
        Loads a `Vocabulary` that was serialized either using `save_to_files` or inside
        a model archive file.

        # Parameters

        directory : `str`
            The directory or archive file containing the serialized vocabulary.
        """
        logger.info("Loading token dictionary from %s.", directory)
        padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
        oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN

        if not os.path.isdir(directory):
            base_directory = cached_path(directory, extract_archive=True)
            # For convenience we'll check for a 'vocabulary' subdirectory of the archive.
            # That way you can use model archives directly.
            vocab_subdir = os.path.join(base_directory, "vocabulary")
            if os.path.isdir(vocab_subdir):
                directory = vocab_subdir
            elif os.path.isdir(base_directory):
                directory = base_directory
            else:
                raise ConfigurationError(f"{directory} is neither a directory nor an archive")

        # We use a lock file to avoid race conditions where multiple processes
        # might be reading/writing from/to the same vocab files at once.
        with FileLock(os.path.join(directory, ".lock")):
            with codecs.open(
                os.path.join(directory, NAMESPACE_PADDING_FILE), "r", "utf-8"
            ) as namespace_file:
                non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]

            vocab = cls(
                non_padded_namespaces=non_padded_namespaces,
                padding_token=padding_token,
                oov_token=oov_token,
            )

            # Check every file in the directory.
            for namespace_filename in os.listdir(directory):
                if namespace_filename == NAMESPACE_PADDING_FILE:
                    continue
                if namespace_filename.startswith("."):
                    continue
                namespace = namespace_filename.replace(".txt", "")
                if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
                    is_padded = False
                else:
                    is_padded = True
                filename = os.path.join(directory, namespace_filename)
                vocab.set_from_file(filename, is_padded, namespace=namespace, oov_token=oov_token)

        return vocab 
开发者ID:allenai,项目名称:allennlp,代码行数:62,代码来源:vocabulary.py


注:本文中的allennlp.common.util.namespace_match方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。