当前位置: 首页>>代码示例>>Python>>正文


Python Tqdm.tqdm方法代码示例

本文整理汇总了Python中allennlp.common.tqdm.Tqdm.tqdm方法的典型用法代码示例。如果您正苦于以下问题:Python Tqdm.tqdm方法的具体用法?Python Tqdm.tqdm怎么用?Python Tqdm.tqdm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.tqdm.Tqdm的用法示例。


在下文中一共展示了Tqdm.tqdm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _read_pretrained_tokens

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def _read_pretrained_tokens(embeddings_file_uri: str) -> List[str]:
    # Moving this import to the top breaks everything (cycling import, I guess)
    from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile

    logger.info("Reading pretrained tokens from: %s", embeddings_file_uri)
    tokens: List[str] = []
    with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
        for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
            token_end = line.find(" ")
            if token_end >= 0:
                token = line[:token_end]
                tokens.append(token)
            else:
                line_begin = line[:20] + "..." if len(line) > 20 else line
                logger.warning("Skipping line number %d: %s", line_number, line_begin)
    return tokens 
开发者ID:allenai,项目名称:allennlp,代码行数:18,代码来源:vocabulary.py

示例2: description_from_metrics

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def description_from_metrics(metrics: Dict[str, float]) -> str:
    if not HasBeenWarned.tqdm_ignores_underscores and any(
        metric_name.startswith("_") for metric_name in metrics
    ):
        logger.warning(
            'Metrics with names beginning with "_" will ' "not be logged to the tqdm progress bar."
        )
        HasBeenWarned.tqdm_ignores_underscores = True
    return (
        ", ".join(
            [
                "%s: %.4f" % (name, value)
                for name, value in metrics.items()
                if not name.startswith("_")
            ]
        )
        + " ||"
    ) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:util.py

示例3: _read_pretrained_tokens

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def _read_pretrained_tokens(embeddings_file_uri     )            :
    # Moving this import to the top breaks everything (cycling import, I guess)
    from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile

    logger.info(u'Reading pretrained tokens from: %s', embeddings_file_uri)
    tokens = set()
    with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
        for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
            token_end = line.find(u' ')
            if token_end >= 0:
                token = line[:token_end]
                tokens.add(token)
            else:
                line_begin = line[:20] + u'...' if len(line) > 20 else line
                logger.warning('Skipping line number %d: %s', line_number, line_begin)
    return tokens 
开发者ID:plasticityai,项目名称:magnitude,代码行数:18,代码来源:vocabulary.py

示例4: extend_from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def extend_from_instances(self,
                              params        ,
                              instances                           = ())        :
        u"""
        Extends an already generated vocabulary using a collection of instances.
        """
        min_count = params.pop(u"min_count", None)
        max_vocab_size = pop_max_vocab_size(params)
        non_padded_namespaces = params.pop(u"non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES)
        pretrained_files = params.pop(u"pretrained_files", {})
        only_include_pretrained_words = params.pop_bool(u"only_include_pretrained_words", False)
        tokens_to_add = params.pop(u"tokens_to_add", None)
        params.assert_empty(u"Vocabulary - from dataset")

        logger.info(u"Fitting token dictionary from dataset.")
        namespace_token_counts                            = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        self._extend(counter=namespace_token_counts,
                     min_count=min_count,
                     max_vocab_size=max_vocab_size,
                     non_padded_namespaces=non_padded_namespaces,
                     pretrained_files=pretrained_files,
                     only_include_pretrained_words=only_include_pretrained_words,
                     tokens_to_add=tokens_to_add) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:27,代码来源:vocabulary.py

示例5: _read_pretrained_tokens

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def _read_pretrained_tokens(embeddings_file_uri: str) -> List[str]:
    # Moving this import to the top breaks everything (cycling import, I guess)
    from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile

    logger.info('Reading pretrained tokens from: %s', embeddings_file_uri)
    tokens: List[str] = []
    with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
        for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
            token_end = line.find(' ')
            if token_end >= 0:
                token = line[:token_end]
                tokens.append(token)
            else:
                line_begin = line[:20] + '...' if len(line) > 20 else line
                logger.warning(f'Skipping line number %d: %s', line_number, line_begin)
    return tokens 
开发者ID:allenai,项目名称:scicite,代码行数:18,代码来源:vocabulary_multitask.py

示例6: from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def from_instances(
        cls,
        instances: Iterable["adi.Instance"],
        min_count: Dict[str, int] = None,
        max_vocab_size: Union[int, Dict[str, int]] = None,
        non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
        pretrained_files: Optional[Dict[str, str]] = None,
        only_include_pretrained_words: bool = False,
        tokens_to_add: Dict[str, List[str]] = None,
        min_pretrained_embeddings: Dict[str, int] = None,
        padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
        oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
    ) -> "Vocabulary":
        """
        Constructs a vocabulary given a collection of `Instances` and some parameters.
        We count all of the vocabulary items in the instances, then pass those counts
        and the other parameters, to :func:`__init__`.  See that method for a description
        of what the other parameters do.

        The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
        but the other parameters do (if you want non-default parameters).
        """
        logger.info("Fitting token dictionary from dataset.")
        padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
        oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)

        return cls(
            counter=namespace_token_counts,
            min_count=min_count,
            max_vocab_size=max_vocab_size,
            non_padded_namespaces=non_padded_namespaces,
            pretrained_files=pretrained_files,
            only_include_pretrained_words=only_include_pretrained_words,
            tokens_to_add=tokens_to_add,
            min_pretrained_embeddings=min_pretrained_embeddings,
            padding_token=padding_token,
            oov_token=oov_token,
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:43,代码来源:vocabulary.py

示例7: from_files_and_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def from_files_and_instances(
        cls,
        instances: Iterable["adi.Instance"],
        directory: str,
        padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
        oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
        min_count: Dict[str, int] = None,
        max_vocab_size: Union[int, Dict[str, int]] = None,
        non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
        pretrained_files: Optional[Dict[str, str]] = None,
        only_include_pretrained_words: bool = False,
        tokens_to_add: Dict[str, List[str]] = None,
        min_pretrained_embeddings: Dict[str, int] = None,
    ) -> "Vocabulary":
        """
        Extends an already generated vocabulary using a collection of instances.

        The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
        but the other parameters do (if you want non-default parameters).  See `__init__` for a
        description of what the other parameters mean.
        """
        vocab = cls.from_files(directory, padding_token, oov_token)
        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        vocab._extend(
            counter=namespace_token_counts,
            min_count=min_count,
            max_vocab_size=max_vocab_size,
            non_padded_namespaces=non_padded_namespaces,
            pretrained_files=pretrained_files,
            only_include_pretrained_words=only_include_pretrained_words,
            tokens_to_add=tokens_to_add,
            min_pretrained_embeddings=min_pretrained_embeddings,
        )
        return vocab 
开发者ID:allenai,项目名称:allennlp,代码行数:39,代码来源:vocabulary.py

示例8: extend_from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def extend_from_instances(self, instances: Iterable["adi.Instance"]) -> None:
        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        self._extend(counter=namespace_token_counts) 
开发者ID:allenai,项目名称:allennlp,代码行数:8,代码来源:vocabulary.py

示例9: evaluate

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def evaluate(model       ,
             instances                    ,
             data_iterator              ,
             cuda_device     )                  :
    _warned_tqdm_ignores_underscores = False
    check_for_gpu(cuda_device)
    with torch.no_grad():
        model.eval()

        iterator = data_iterator(instances,
                                 num_epochs=1,
                                 shuffle=False,
                                 cuda_device=cuda_device)
        logger.info(u"Iterating over dataset")
        generator_tqdm = Tqdm.tqdm(iterator, total=data_iterator.get_num_batches(instances))
        for batch in generator_tqdm:
            model(**batch)
            metrics = model.get_metrics()
            if (not _warned_tqdm_ignores_underscores and
                        any(metric_name.startswith(u"_") for metric_name in metrics)):
                logger.warning(u"Metrics with names beginning with \"_\" will "
                               u"not be logged to the tqdm progress bar.")
                _warned_tqdm_ignores_underscores = True
            description = u', '.join([u"%s: %.2f" % (name, value) for name, value
                                     in list(metrics.items()) if not name.startswith(u"_")]) + u" ||"
            generator_tqdm.set_description(description, refresh=False)

        return model.get_metrics(reset=True) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:30,代码来源:evaluate.py

示例10: from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def from_instances(cls,
                       instances                          ,
                       min_count                 = None,
                       max_vocab_size                             = None,
                       non_padded_namespaces                = DEFAULT_NON_PADDED_NAMESPACES,
                       pretrained_files                           = None,
                       only_include_pretrained_words       = False,
                       tokens_to_add                       = None)                :
        u"""
        Constructs a vocabulary given a collection of `Instances` and some parameters.
        We count all of the vocabulary items in the instances, then pass those counts
        and the other parameters, to :func:`__init__`.  See that method for a description
        of what the other parameters do.
        """
        logger.info(u"Fitting token dictionary from dataset.")
        namespace_token_counts                            = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)

        return Vocabulary(counter=namespace_token_counts,
                          min_count=min_count,
                          max_vocab_size=max_vocab_size,
                          non_padded_namespaces=non_padded_namespaces,
                          pretrained_files=pretrained_files,
                          only_include_pretrained_words=only_include_pretrained_words,
                          tokens_to_add=tokens_to_add)

    # There's enough logic here to require a custom from_params. 
开发者ID:plasticityai,项目名称:magnitude,代码行数:30,代码来源:vocabulary.py

示例11: _validation_loss

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def _validation_loss(self)                     :
        u"""
        Computes the validation loss. Returns it and the number of batches.
        """
        logger.info(u"Validating")

        self._model.eval()

        if self._validation_iterator is not None:
            val_iterator = self._validation_iterator
        else:
            val_iterator = self._iterator

        val_generator = val_iterator(self._validation_data,
                                     num_epochs=1,
                                     shuffle=False,
                                     cuda_device=self._iterator_device)
        num_validation_batches = val_iterator.get_num_batches(self._validation_data)
        val_generator_tqdm = Tqdm.tqdm(val_generator,
                                       total=num_validation_batches)
        batches_this_epoch = 0
        val_loss = 0
        for batch in val_generator_tqdm:

            loss = self._batch_loss(batch, for_training=False)
            if loss is not None:
                # You shouldn't necessarily have to compute a loss for validation, so we allow for
                # `loss` to be None.  We need to be careful, though - `batches_this_epoch` is
                # currently only used as the divisor for the loss function, so we can safely only
                # count those batches for which we actually have a loss.  If this variable ever
                # gets used for something else, we might need to change things around a bit.
                batches_this_epoch += 1
                val_loss += loss.detach().cpu().numpy()

            # Update the description with the latest metrics
            val_metrics = self._get_metrics(val_loss, batches_this_epoch)
            description = self._description_from_metrics(val_metrics)
            val_generator_tqdm.set_description(description, refresh=False)

        return val_loss, batches_this_epoch 
开发者ID:plasticityai,项目名称:magnitude,代码行数:42,代码来源:trainer.py

示例12: _description_from_metrics

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def _description_from_metrics(self, metrics                  )       :
        if (not self._warned_tqdm_ignores_underscores and
                    any(metric_name.startswith(u"_") for metric_name in metrics)):
            logger.warning(u"Metrics with names beginning with \"_\" will "
                           u"not be logged to the tqdm progress bar.")
            self._warned_tqdm_ignores_underscores = True
        return u', '.join([u"%s: %.4f" % (name, value) for name, value in
                          list(metrics.items()) if not name.startswith(u"_")]) + u" ||" 
开发者ID:plasticityai,项目名称:magnitude,代码行数:10,代码来源:trainer.py

示例13: http_get

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def http_get(url     , temp_file    )        :
    req = requests.get(url, stream=True)
    content_length = req.headers.get(u'Content-Length')
    total = int(content_length) if content_length is not None else None
    progress = Tqdm.tqdm(unit=u"B", total=total)
    for chunk in req.iter_content(chunk_size=1024):
        if chunk: # filter out keep-alive new chunks
            progress.update(len(chunk))
            temp_file.write(chunk)
    progress.close()


# TODO(joelgrus): do we want to do checksums or anything like that? 
开发者ID:plasticityai,项目名称:magnitude,代码行数:15,代码来源:file_utils.py

示例14: from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def from_instances(cls,
                       instances: Iterable['adi.Instance'],
                       min_count: Dict[str, int] = None,
                       max_vocab_size: Union[int, Dict[str, int]] = None,
                       non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
                       pretrained_files: Optional[Dict[str, str]] = None,
                       only_include_pretrained_words: bool = False,
                       tokens_to_add: Dict[str, List[str]] = None,
                       min_pretrained_embeddings: Dict[str, int] = None,
                       instances_aux: Optional[Iterable['adi.Instance']] = None) -> 'Vocabulary':
        """
        Constructs a vocabulary given a collection of `Instances` and some parameters.
        We count all of the vocabulary items in the instances, then pass those counts
        and the other parameters, to :func:`__init__`.  See that method for a description
        of what the other parameters do.
        """
        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)

        if instances_aux is not None:
            logger.info("Fitting token dictionary from auxillary dataset.")
            for instance in Tqdm.tqdm(instances_aux):
                instance.count_vocab_items(namespace_token_counts)

        return VocabularyMultitask(counter=namespace_token_counts,
                          min_count=min_count,
                          max_vocab_size=max_vocab_size,
                          non_padded_namespaces=non_padded_namespaces,
                          pretrained_files=pretrained_files,
                          only_include_pretrained_words=only_include_pretrained_words,
                          tokens_to_add=tokens_to_add,
                          min_pretrained_embeddings=min_pretrained_embeddings)

    # There's enough logic here to require a custom from_params. 
开发者ID:allenai,项目名称:scicite,代码行数:38,代码来源:vocabulary_multitask.py

示例15: extend_from_instances

# 需要导入模块: from allennlp.common.tqdm import Tqdm [as 别名]
# 或者: from allennlp.common.tqdm.Tqdm import tqdm [as 别名]
def extend_from_instances(self,
                              params: Params,
                              instances: Iterable['adi.Instance'] = ()) -> None:
        """
        Extends an already generated vocabulary using a collection of instances.
        """
        min_count = params.pop("min_count", None)
        max_vocab_size = pop_max_vocab_size(params)
        non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES)
        pretrained_files = params.pop("pretrained_files", {})
        min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None)
        only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False)
        tokens_to_add = params.pop("tokens_to_add", None)
        params.assert_empty("Vocabulary - from dataset")

        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        self._extend(counter=namespace_token_counts,
                     min_count=min_count,
                     max_vocab_size=max_vocab_size,
                     non_padded_namespaces=non_padded_namespaces,
                     pretrained_files=pretrained_files,
                     only_include_pretrained_words=only_include_pretrained_words,
                     tokens_to_add=tokens_to_add,
                     min_pretrained_embeddings=min_pretrained_embeddings) 
开发者ID:allenai,项目名称:scicite,代码行数:29,代码来源:vocabulary_multitask.py


注:本文中的allennlp.common.tqdm.Tqdm.tqdm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。