本文整理汇总了Python中loguru.logger.success方法的典型用法代码示例。如果您正苦于以下问题:Python logger.success方法的具体用法?Python logger.success怎么用?Python logger.success使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类loguru.logger
的用法示例。
在下文中一共展示了logger.success方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: match_clinvar
# 需要导入模块: from loguru import logger [as 别名]
# 或者: from loguru.logger import success [as 别名]
def match_clinvar(self) -> None:
"""Match the input variant with the ClinVar table.
Update :attr:`CharGerResult.clinvar` the variant matches a ClinVar record
by calling :meth:`_match_clinvar_one_variant`.
"""
if self.config.clinvar_table is None:
logger.info("Skip matching ClinVar")
return
logger.info(
f"Match input variants with ClinVar table at {self.config.clinvar_table}"
)
clinvar_match_num = 0
with TabixFile(str(self.config.clinvar_table), encoding="utf8") as tabix:
cols = tabix.header[0][len("#") :].split("\t")
for result in self.results:
record = self._match_clinvar_one_variant(result.variant, tabix, cols)
if record is not None:
result.clinvar = record
clinvar_match_num += 1
logger.success(
f"Matched {clinvar_match_num:,d} out of {len(self.input_variants):,d} input variants to a ClinVar record"
)
示例2: test_heavily_threaded_logging
# 需要导入模块: from loguru import logger [as 别名]
# 或者: from loguru.logger import success [as 别名]
def test_heavily_threaded_logging(capsys):
logger.remove()
def function():
i = logger.add(NonSafeSink(0.1), format="{message}", catch=False)
logger.debug("AAA")
logger.info("BBB")
logger.success("CCC")
logger.remove(i)
threads = [Thread(target=function) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.remove()
out, err = capsys.readouterr()
assert out == ""
assert err == ""
示例3: _read_input_variants
# 需要导入模块: from loguru import logger [as 别名]
# 或者: from loguru.logger import success [as 别名]
def _read_input_variants(self) -> None:
"""Read input VCF and set up the result template
Load :attr:`input_variants` from :attr:`self.config.input <.CharGerConfig.input>`.
Also populate :attr:`results` matching the input variant.
"""
if self.config.input is None:
raise ValueError(f"No input file is given in the config")
logger.info(f"Read input VCF from {self.config.input}")
# TODO: Skip variants with filter, or with high allele frequency
# num_skipped_variants: Dict[str, int] = {"has_filter": 0}
for variant in Variant.read_and_parse_vcf(self.config.input):
# # Skip the variant with filter (not PASS)
# if variant.filter:
# logger.warning(
# f"{variant} has filter {','.join(variant.filter)}. Skipped"
# )
# num_skipped_variants["has_filter"] += 1
# continue
self.input_variants.append(variant)
# We also create the result template
self.results.append(CharGerResult(variant))
logger.success(
f"Read total {len(self.input_variants):,d} variants from the input VCF"
)
示例4: configure_logging
# 需要导入模块: from loguru import logger [as 别名]
# 或者: from loguru.logger import success [as 别名]
def configure_logging(
modifier=0,
*,
username=None,
debug=False,
log_to_stdout=True,
log_to_file=False
):
logger.remove()
if debug:
logger.enable('audio_metadata')
logger.enable('google_music')
logger.enable('google_music-proto')
logger.enable('google_music_utils')
verbosity = 3 + modifier
if verbosity < 0:
verbosity = 0
elif verbosity > 8:
verbosity = 8
log_level = VERBOSITY_LOG_LEVELS[verbosity]
if log_to_stdout:
logger.add(
sys.stdout,
level=log_level,
format=LOG_FORMAT,
backtrace=False
)
if log_to_file:
log_dir = ensure_log_dir(username=username)
log_file = (log_dir / time.strftime('%Y-%m-%d_%H-%M-%S')).with_suffix('.log')
logger.success("Logging to file: {}", log_file)
logger.add(
log_file,
level=log_level,
format=LOG_FORMAT,
backtrace=False,
encoding='utf8',
newline='\n'
)
示例5: train
# 需要导入模块: from loguru import logger [as 别名]
# 或者: from loguru.logger import success [as 别名]
def train(
root=True,
binary=False,
bert="bert-large-uncased",
epochs=30,
batch_size=32,
save=False,
):
trainset = SSTDataset("train", root=root, binary=binary)
devset = SSTDataset("dev", root=root, binary=binary)
testset = SSTDataset("test", root=root, binary=binary)
config = BertConfig.from_pretrained(bert)
if not binary:
config.num_labels = 5
model = BertForSequenceClassification.from_pretrained(bert, config=config)
model = model.to(device)
lossfn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
for epoch in range(1, epochs):
train_loss, train_acc = train_one_epoch(
model, lossfn, optimizer, trainset, batch_size=batch_size
)
val_loss, val_acc = evaluate_one_epoch(
model, lossfn, optimizer, devset, batch_size=batch_size
)
test_loss, test_acc = evaluate_one_epoch(
model, lossfn, optimizer, testset, batch_size=batch_size
)
logger.info(f"epoch={epoch}")
logger.info(
f"train_loss={train_loss:.4f}, val_loss={val_loss:.4f}, test_loss={test_loss:.4f}"
)
logger.info(
f"train_acc={train_acc:.3f}, val_acc={val_acc:.3f}, test_acc={test_acc:.3f}"
)
if save:
label = "binary" if binary else "fine"
nodes = "root" if root else "all"
torch.save(model, f"{bert}__{nodes}__{label}__e{epoch}.pickle")
logger.success("Done!")