本文整理汇总了Python中sacred.observers.FileStorageObserver.create方法的典型用法代码示例。如果您正苦于以下问题:Python FileStorageObserver.create方法的具体用法?Python FileStorageObserver.create怎么用?Python FileStorageObserver.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sacred.observers.FileStorageObserver
的用法示例。
在下文中一共展示了FileStorageObserver.create方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_single_experiment
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def run_single_experiment(dataset: str,
savedir: str,
named_configs: List,
config_updates: Dict[str, Any]):
from tape.__main__ import proteins
config_updates.update({
'training': {'learning_rate': 1e-4, 'use_memory_saving_gradients': True},
'num_epochs': 1000,
'steps_per_epoch': 200,
'tasks': dataset})
if not os.path.exists(savedir):
os.mkdir(savedir)
shutil.rmtree(proteins.observers[0].basedir)
proteins.observers[0] = FileStorageObserver.create(
os.path.join(savedir, dataset))
proteins.run(
named_configs=named_configs,
config_updates=config_updates)
示例2: config
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def config():
locals().update({k: v.default for k, v in inspect.signature(get_enhancer).parameters.items()})
session_id = 'dev'
storage_dir: str = None
database_rttm: str = None
activity_rttm: str = database_rttm
job_id = 1
number_of_jobs = 1
assert storage_dir is not None, (storage_dir, 'overwrite the storage_dir from the command line')
assert database_rttm is not None, (database_rttm, 'overwrite the database_rttm from the command line')
assert activity_rttm is not None, (database_rttm, 'overwrite the activity_rttm from the command line')
if dlp_mpi.IS_MASTER:
experiment.observers.append(FileStorageObserver.create(str(
Path(storage_dir).expanduser().resolve() / 'sacred'
)))
示例3: config
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def config():
chime6 = False
if chime6:
locals().update({k: v.default for k, v in inspect.signature(get_enhancer_chime6).parameters.items()})
else:
locals().update({k: v.default for k, v in inspect.signature(get_enhancer).parameters.items()})
session_id = 'dev'
storage_dir: str = None
job_id = 1
number_of_jobs = 1
assert storage_dir is not None, (storage_dir, 'overwrite the storage_dir from the command line')
if dlp_mpi.IS_MASTER:
experiment.observers.append(FileStorageObserver.create(str(
Path(storage_dir).expanduser().resolve() / 'sacred'
)))
示例4: setup_file_observer
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def setup_file_observer():
file_obs_path = os.path.join(results_path, "sacred")
logger.info("FileStorageObserver path: {}".format(file_obs_path))
logger.info("Using the FileStorageObserver in results/sacred")
ex.observers.append(FileStorageObserver.create(file_obs_path))
pass
示例5: single_run
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def single_run(config_updates, rundir, _id):
for i in range(3):
try:
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
run()
break
except TypeError:
if i < 2:
print("Run %i failed at start, retrying..." % _id)
else:
print("Giving up %i" % _id)
continue
示例6: single_run
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def single_run(config_updates, rundir, _id):
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
try:
run()
except:
print('Run %i failed' % _id)
示例7: polish
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def polish(trial):
"""Load model from checkpoint, then fix the order of the factor
matrices (using the largest logits), and re-optimize using L-BFGS to find
the nearest local optima.
"""
# Hack: create new instance without call __init__, since trainable.__init__
# creates result_dir and log_dir in the wrong place (~/ray_results)
trainable_cls = TrainableBP
trainable = trainable_cls.__new__(trainable_cls)
trainable._setup(trial.config)
trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
loss = trainable.polish(N_LBFGS_STEPS, save_to_self_model=True)
torch.save(trainable.model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
return loss
示例8: config
# 需要导入模块: from sacred.observers import FileStorageObserver [as 别名]
# 或者: from sacred.observers.FileStorageObserver import create [as 别名]
def config():
logdir = 'runs/transcriber-' + datetime.now().strftime('%y%m%d-%H%M%S')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
iterations = 500000
resume_iteration = None
checkpoint_interval = 1000
train_on = 'MAESTRO'
batch_size = 8
sequence_length = 327680
model_complexity = 48
if torch.cuda.is_available() and torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory < 10e9:
batch_size //= 2
sequence_length //= 2
print(f'Reducing batch size to {batch_size} and sequence_length to {sequence_length} to save memory')
learning_rate = 0.0006
learning_rate_decay_steps = 10000
learning_rate_decay_rate = 0.98
leave_one_out = None
clip_gradient_norm = 3
validation_length = sequence_length
validation_interval = 500
ex.observers.append(FileStorageObserver.create(logdir))