本文整理汇总了Python中utils.Config方法的典型用法代码示例。如果您正苦于以下问题:Python utils.Config方法的具体用法?Python utils.Config怎么用?Python utils.Config使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.Config方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def load_data(train_file, test_file, eval_dev=-1):
for filename in [train_file, test_file]:
if filename is None:
continue
if not (os.path.exists(filename) and os.path.isfile(filename)):
raise RuntimeError("File not found when loading data from %s" % filename)
def _load(filename):
if filename is None:
return []
ret = []
with open(filename, "r") as fin:
for line in fin:
if line:
key, string = line.split(":", 1)
op, _, shape_str, target_str = key.split("_")
shape = [int(x) for x in shape_str[1:-1].split(", ")]
target, dev_id_str = target_str[:-1].split("(")
dev_id = int(dev_id_str) if eval_dev < 0 else eval_dev
config = json.loads(string)
ret.append(DataItem(
op=op,
shape=shape,
target=TargetItem(target=target, dev_id=dev_id),
config=utils.Config(config[0], config[1]))
)
return ret
return _load(train_file), _load(test_file)
示例2: get_estimator
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def get_estimator(**kwargs):
"""Construct an estimator."""
cfg = utils.Config(kwargs)
if cfg.tpu.get('name'):
tf.logging.info('Using cluster resolver.')
cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
cfg.tpu.name, zone=cfg.tpu.zone, project=cfg.tpu.gcp_project)
master = None
else:
cluster_resolver = None
master = cfg.master
tf.logging.info('Config:\n %s' % cfg)
if cfg.tpu.enable:
if not cfg.steps_per_epoch:
raise ValueError('steps_per_epoch must be nonzero on TPU.')
exp = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=tf.contrib.tpu.RunConfig(
cluster=cluster_resolver,
master=master,
model_dir=cfg.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=cfg.steps_per_epoch)),
use_tpu=True,
eval_on_tpu=False,
# TPU requires these args, but they are ignored inside the input
# function, which directly get train_batch_size or eval_batch_size.
train_batch_size=cfg.dataset.train_batch_size,
eval_batch_size=cfg.dataset.eval_batch_size,
params=cfg,
)
else:
exp = tf.estimator.Estimator(
model_fn=model_fn, model_dir=cfg.model_dir, params=cfg)
return exp
示例3: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config, model_config)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
# evaluation
summary_manager = SummaryManager(exp_dir)
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例4: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
ptr_config_info = Config(f"conf/pretrained/{model_config.type}.json")
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
f"_weight_decay_{args.weight_decay}"
)
preprocessor = get_preprocessor(ptr_config_info, model_config)
with open(ptr_config_info.config, mode="r") as io:
ptr_config = json.load(io)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint('best.tar')
config = BertConfig()
config.update(ptr_config)
model = PairwiseClassifier(config, num_classes=model_config.num_classes, vocab=preprocessor.vocab)
model.load_state_dict(checkpoint['model_state_dict'])
# evaluation
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, preprocessor.preprocess)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
summary_manager = SummaryManager(exp_dir)
summary = evaluate(model, dl, {'loss': nn.CrossEntropyLoss(), 'acc': acc}, device)
summary_manager.load('summary.json')
summary_manager.update({'{}'.format(args.data): summary})
summary_manager.save('summary.json')
print('loss: {:.3f}, acc: {:.2%}'.format(summary['loss'], summary['acc']))
示例5: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = ConvRec(num_classes=model_config.num_classes, embedding_dim=model_config.embedding_dim,
hidden_dim=model_config.hidden_dim, vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
summary_manager = SummaryManager(exp_dir)
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform, min_length=model_config.min_length,
pad_val=tokenizer.vocab.to_indices(' '))
dl = DataLoader(ds, batch_size=args.batch_size, collate_fn=batchify)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例6: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config, model_config)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = VDCNN(num_classes=model_config.num_classes, embedding_dim=model_config.embedding_dim,
k_max=model_config.k_max, vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
summary_manager = SummaryManager(exp_dir)
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例7: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = SAN(num_classes=model_config.num_classes, lstm_hidden_dim=model_config.lstm_hidden_dim,
da=model_config.da, r=model_config.r, hidden_dim=model_config.hidden_dim, vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
# evaluation
summary_manager = SummaryManager(exp_dir)
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4, collate_fn=batchify)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例8: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
ptr_config_info = Config(f"conf/pretrained/{model_config.type}.json")
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
f"_weight_decay_{args.weight_decay}"
)
preprocessor = get_preprocessor(ptr_config_info, model_config)
with open(ptr_config_info.config, mode="r") as io:
ptr_config = json.load(io)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint('best.tar')
config = BertConfig()
config.update(ptr_config)
model = SentenceClassifier(config, num_classes=model_config.num_classes, vocab=preprocessor.vocab)
model.load_state_dict(checkpoint['model_state_dict'])
# evaluation
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, preprocessor.preprocess)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
summary_manager = SummaryManager(exp_dir)
summary = evaluate(model, dl, {'loss': nn.CrossEntropyLoss(), 'acc': acc}, device)
summary_manager.load('summary.json')
summary_manager.update({'{}'.format(args.data): summary})
summary_manager.save('summary.json')
print('loss: {:.3f}, acc: {:.2%}'.format(summary['loss'], summary['acc']))
示例9: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
preprocessor = get_preprocessor(dataset_config, coarse_split_fn=split_morphs, fine_split_fn=split_jamos)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = SAN(model_config.num_classes, preprocessor.coarse_vocab, preprocessor.fine_vocab,
model_config.fine_embedding_dim, model_config.hidden_dim, model_config.multi_step,
model_config.prediction_drop_ratio)
model.load_state_dict(checkpoint["model_state_dict"])
# evaluation
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, preprocessor.preprocess)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4, collate_fn=batchify)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary_manager = SummaryManager(exp_dir)
summary = evaluate(model, dl, {"loss": log_loss, "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例10: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config, model_config)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = CharCNN(num_classes=model_config.num_classes, embedding_dim=model_config.embedding_dim,
vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
summary_manager = SummaryManager(exp_dir)
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
示例11: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import Config [as 别名]
def main(args):
dataset_config = Config(args.dataset_config)
model_config = Config(args.model_config)
exp_dir = Path("experiments") / model_config.type
exp_dir = exp_dir.joinpath(
f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
)
tokenizer = get_tokenizer(dataset_config, split_fn=split_morphs)
# model (restore)
checkpoint_manager = CheckpointManager(exp_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
model = MaLSTM(num_classes=model_config.num_classes, hidden_dim=model_config.hidden_dim, vocab=tokenizer.vocab)
model.load_state_dict(checkpoint["model_state_dict"])
# evaluation
filepath = getattr(dataset_config, args.data)
ds = Corpus(filepath, tokenizer.split_and_transform)
dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4, collate_fn=batchify)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
summary_manager = SummaryManager(exp_dir)
summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)
summary_manager.load("summary.json")
summary_manager.update({f"{args.data}": summary})
summary_manager.save("summary.json")
print("loss: {:.3f}, acc: {:.2%}".format(summary["loss"], summary["acc"]))