本文整理汇总了Python中torch.manual_seed方法的典型用法代码示例。如果您正苦于以下问题:Python torch.manual_seed方法的具体用法?Python torch.manual_seed怎么用?Python torch.manual_seed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.manual_seed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_random_seed
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
示例2: construct_graph
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def construct_graph(self):
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
# Build the main computation graph
self.net.create_architecture(self.imdb.num_classes, tag='default')
# Define the loss
# loss = layers['total_loss']
# Set learning rate and momentum
lr = cfg.TRAIN.LEARNING_RATE
params = []
for key, value in dict(self.net.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# Write the train and validation information to tensorboard
self.writer = tb.writer.FileWriter(self.tbdir)
# self.valwriter = tb.writer.FileWriter(self.tbvaldir)
return lr, self.optimizer
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:24,代码来源:train_val.py
示例3: init
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def init(args):
# init logger
log_format = '%(asctime)-10s: %(message)s'
if args.log_file is not None and args.log_file != "":
Path(args.log_file).parent.mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format)
logging.warning(f'This will get logged to file: {args.log_file}')
else:
logging.basicConfig(level=logging.INFO, format=log_format)
# create output dir
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
assert 'bert' in args.output_dir.name, \
'''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type'''
args.output_dir.mkdir(parents=True, exist_ok=True)
# set random seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
示例4: load_checkpoint
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def load_checkpoint(self, file_name):
filename = self.config.checkpoint_dir + file_name
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
self.current_epoch = checkpoint['epoch']
self.current_iteration = checkpoint['iteration']
self.netG.load_state_dict(checkpoint['G_state_dict'])
self.optimG.load_state_dict(checkpoint['G_optimizer'])
self.netD.load_state_dict(checkpoint['D_state_dict'])
self.optimD.load_state_dict(checkpoint['D_optimizer'])
self.fixed_noise = checkpoint['fixed_noise']
self.manual_seed = checkpoint['manual_seed']
self.logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
.format(self.config.checkpoint_dir, checkpoint['epoch'], checkpoint['iteration']))
except OSError as e:
self.logger.info("No checkpoint exists from '{}'. Skipping...".format(self.config.checkpoint_dir))
self.logger.info("**First time to train**")
示例5: main
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def main():
args = Parameters().parse()
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
# Dataset
datasets = create_datasets(args)
# Network
net = create_network(args)
# Loss Function
criterion = create_lossfunc(args, net)
# optimizer and parameters
optim_params = create_params(args, net)
optimizer = create_optimizer(args, optim_params)
# learning rate scheduler
scheduler = create_scheduler(args, optimizer, datasets)
if args.mode == 'train':
train(args, net, datasets, criterion, optimizer, scheduler)
return
if args.mode == 'test':
test(args, net, datasets)
return
示例6: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512):
super(CBP, self).__init__()
self.thresh = thresh
self.projDim = projDim
self.input_dim = input_dim
self.output_dim = projDim
torch.manual_seed(1)
self.h_ = [
torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long),
torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long)
]
self.weights_ = [
(2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(),
(2 * torch.randint(0, 2, (self.input_dim,)) - 1).float()
]
indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
self.h_[0].reshape(1, -1)), dim=0)
indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
self.h_[1].reshape(1, -1)), dim=0)
self.sparseM = [
torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(),
torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(),
]
示例7: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def __init__(self,
archive_file=DEFAULT_ARCHIVE_FILE,
model_file=None):
SysPolicy.__init__(self)
if not os.path.isfile(archive_file):
if not model_file:
raise Exception("No model for Sequicity is specified!")
archive_file = cached_path(model_file)
model_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(model_dir, 'data')):
archive = zipfile.ZipFile(archive_file, 'r')
archive.extractall(model_dir)
cfg.init_handler('tsdf-multiwoz')
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed(cfg.seed)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
self.m = Model('multiwoz')
self.m.count_params()
self.m.load_model()
self.reset()
示例8: main
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
hvd.init()
local_rank = hvd.local_rank()
torch.cuda.set_device(local_rank)
main_worker(local_rank, 4, args)
示例9: main
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
# torch.backends.cudnn.enabled = False
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
args.local_rank = int(os.environ["SLURM_PROCID"])
args.world_size = int(os.environ["SLURM_NPROCS"])
ngpus_per_node = torch.cuda.device_count()
job_id = os.environ["SLURM_JOBID"]
args.dist_url = "file://{}.{}".format(os.path.realpath(args.dist_file), job_id)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
示例10: reset_parameters
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def reset_parameters(self,
init_shared=lambda x: normal(x, std=0.1),
init_importance=lambda x: normal(x, std=0.0005)):
"""Resets the trainable parameters."""
def set_constant_row(parameters, iRow=0, value=0):
"""Return `parameters` with row `iRow` as s constant `value`."""
data = parameters.data
data[iRow, :] = value
return torch.nn.Parameter(data, requires_grad=parameters.requires_grad)
np.random.seed(self.seed)
if self.seed is not None:
torch.manual_seed(self.seed)
self.shared_embeddings.weight = init_shared(self.shared_embeddings.weight)
self.importance_weights.weight = init_importance(self.importance_weights.weight)
if self.padding_idx is not None:
# Unfortunately has to set weight to 0 even when paddingIdx = 0
self.shared_embeddings.weight = set_constant_row(self.shared_embeddings.weight)
self.importance_weights.weight = set_constant_row(self.importance_weights.weight)
self.shared_embeddings.weight.requires_grad = self.train_sharedEmbed
self.importance_weights.weight.requires_grad = self.train_weight
示例11: eval_on_test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def eval_on_test(batch_size, num_workers, seed, _log):
torch.manual_seed(seed)
np.random.seed(seed)
device = set_device()
test_dataset, (c, h, w) = get_test_data()
_log.info('Test dataset size: {}'.format(len(test_dataset)))
_log.info('Image dimensions: {}x{}x{}'.format(c, h, w))
flow = create_flow(c, h, w).to(device)
flow.eval()
def log_prob_fn(batch):
return flow.log_prob(batch.to(device))
test_loader=DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers)
test_loader = tqdm(test_loader)
mean, err = autils.eval_log_density_2(log_prob_fn=log_prob_fn,
data_loader=test_loader,
c=c, h=h, w=w)
print('Test log probability (bits/dim): {:.2f} +/- {:.4f}'.format(mean, err))
示例12: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def sample(seed, num_bits, num_samples, samples_per_row, _log, output_path=None):
torch.set_grad_enabled(False)
if output_path is None:
output_path = 'samples.png'
torch.manual_seed(seed)
np.random.seed(seed)
device = set_device()
_, _, (c, h, w) = get_train_valid_data()
flow = create_flow(c, h, w).to(device)
flow.eval()
preprocess = Preprocess(num_bits)
samples = flow.sample(num_samples)
samples = preprocess.inverse(samples)
save_image(samples.cpu(), output_path,
nrow=samples_per_row,
padding=0)
示例13: main
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def main(seed, _log):
torch.manual_seed(seed)
np.random.seed(seed)
device = set_device()
train_dataset, val_dataset, (c, h, w) = get_train_valid_data()
_log.info('Training dataset size: {}'.format(len(train_dataset)))
if val_dataset is None:
_log.info('No validation dataset')
else:
_log.info('Validation dataset size: {}'.format(len(val_dataset)))
_log.info('Image dimensions: {}x{}x{}'.format(c, h, w))
flow = create_flow(c, h, w)
train_flow(flow, train_dataset, val_dataset, (c, h, w), device)
示例14: test_adam_lorentz
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def test_adam_lorentz(params):
lorentz = geoopt.manifolds.Lorentz(k=torch.Tensor([1.0]))
torch.manual_seed(42)
with torch.no_grad():
X = geoopt.ManifoldParameter(torch.randn(20, 10), manifold=lorentz).proj_()
Xstar = torch.randn(20, 10)
Xstar.set_(lorentz.projx(Xstar))
def closure():
optim.zero_grad()
loss = (Xstar - X).pow(2).sum()
loss.backward()
return loss.item()
optim = geoopt.optim.RiemannianAdam([X], stabilize=4500, **params)
for _ in range(10000):
if (Xstar - X).norm() < 1e-5:
break
optim.step(closure)
assert X.is_contiguous()
np.testing.assert_allclose(X.data, Xstar, atol=1e-5, rtol=1e-5)
optim.load_state_dict(optim.state_dict())
optim.step(closure)
示例15: test_adam_poincare
# 需要导入模块: import torch [as 别名]
# 或者: from torch import manual_seed [as 别名]
def test_adam_poincare():
torch.manual_seed(44)
manifold = geoopt.PoincareBall()
ideal = torch.tensor([0.5, 0.5])
start = torch.randn(2) / 2
start = manifold.expmap0(start)
start = geoopt.ManifoldParameter(start, manifold=manifold)
def closure():
optim.zero_grad()
loss = manifold.dist(start, ideal) ** 2
loss.backward()
return loss.item()
optim = geoopt.optim.RiemannianAdam([start], lr=1e-2)
for _ in range(2000):
optim.step(closure)
np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)