本文整理汇总了Python中torch.manual_seed函数的典型用法代码示例。如果您正苦于以下问题:Python manual_seed函数的具体用法?Python manual_seed怎么用?Python manual_seed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了manual_seed函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bilstm_word_embeds_d4_1
def test_bilstm_word_embeds_d4_1():
""" 1 point(s) / 0.5 point(s) (section dependent) """
global test_sent, word_to_ix, vocab
torch.manual_seed(1)
embedder = BiLSTMWordEmbeddingLookup(word_to_ix, TEST_EMBEDDING_DIM, TEST_EMBEDDING_DIM, 1, 0.0)
embeds = embedder(test_sent)
assert len(embeds) == len(test_sent)
assert isinstance(embeds, list)
assert isinstance(embeds[0], ag.Variable)
assert embeds[0].size() == (1, TEST_EMBEDDING_DIM)
embeds_list = make_list(embeds)
true = (
[ .4916, -.0168, .1719, .6615 ],
[ .3756, -.0610, .1851, .2604 ],
[ -.2655, -.1289, .1009, -.0016 ],
[ -.1070, -.3971, .2414, -.2588 ],
[ -.1717, -.4475, .2739, -.0465 ],
[ 0.0684, -0.2586, 0.2123, -0.1832 ],
[ -0.0775, -0.4308, 0.1844, -0.1146 ],
[ 0.4366, -0.0507, 0.1018, 0.4015 ],
[ -0.1265, -0.2192, 0.0481, 0.1551 ])
pairs = zip(embeds_list, true)
check_tensor_correctness(pairs)
示例2: setUp
def setUp(self, length=3, factor=10, count=1000000,
seed=None, dtype=torch.float64, device=None):
'''Set up the test values.
Args:
length: Size of the vector.
factor: To multiply the mean and standard deviation.
count: Number of samples for Monte-Carlo estimation.
seed: Seed for the random number generator.
dtype: The data type.
device: In which device.
'''
if seed is not None:
torch.manual_seed(seed)
# variables
self.A = torch.randn(length, length, dtype=dtype, device=device)
self.b = torch.randn(length, dtype=dtype, device=device)
# input mean and covariance
self.mu = torch.randn(length, dtype=dtype, device=device) * factor
self.cov = rand.definite(length, dtype=dtype, device=device,
positive=True, semi=False, norm=factor**2)
# Monte-Carlo estimation of the output mean and variance
normal = torch.distributions.MultivariateNormal(self.mu, self.cov)
samples = normal.sample((count,))
out_samples = samples.matmul(self.A.t()) + self.b
self.mc_mu = torch.mean(out_samples, dim=0)
self.mc_var = torch.var(out_samples, dim=0)
self.mc_cov = cov(out_samples)
示例3: main
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
示例4: train_model
def train_model(args):
"""Load the data, train the model, test the model, export / save the model
"""
torch.manual_seed(args.seed)
# Open our dataset
train_loader, test_loader = data_utils.load_data(args.test_split,
args.batch_size)
# Create the model
net = model.SonarDNN().double()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=args.momentum, nesterov=False)
# Train / Test the model
for epoch in range(1, args.epochs + 1):
train(net, train_loader, optimizer, epoch)
test(net, test_loader)
# Export the trained model
torch.save(net.state_dict(), args.model_name)
if args.model_dir:
# Save the model to GCS
data_utils.save_model(args.model_dir, args.model_name)
示例5: prepare_environment
def prepare_environment(params: Params):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
"""
seed = params.pop_int("random_seed", 13370)
numpy_seed = params.pop_int("numpy_seed", 1337)
torch_seed = params.pop_int("pytorch_seed", 133)
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
if torch_seed is not None:
torch.manual_seed(torch_seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(torch_seed)
log_pytorch_version_info()
示例6: __init__
def __init__(self, seed=1):
super(NN_drop, self).__init__()
torch.manual_seed(seed)
self.input_size = 1
self.output_size = 1
h_size = 50
# #this samples a mask for each datapoint in the batch
# self.net = nn.Sequential(
# nn.Linear(self.input_size,h_size),
# nn.ReLU(),
# nn.Dropout(p=0.5),
# nn.Linear(h_size,self.output_size)
# )
#want to keep mask constant for batch
self.l1 = nn.Linear(self.input_size,h_size)
self.a1 = nn.ReLU()
# nn.Dropout(p=0.5),
self.l2 = nn.Linear(h_size,self.output_size)
self.optimizer = optim.Adam(self.parameters(), lr=.01)
示例7: seed_everything
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
示例8: test_horovod_allreduce_inplace
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = tensor.type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
max_difference = tensor.sub(multiplied).max()
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert max_difference <= threshold, 'hvd.allreduce produces incorrect results'
示例9: predict_fn
def predict_fn(input_data, model):
logger.info('Generating text based on input parameters.')
corpus = model['corpus']
model = model['model']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
torch.manual_seed(input_data['seed'])
ntokens = len(corpus.dictionary)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
hidden = model.init_hidden(1)
logger.info('Generating {} words.'.format(input_data['words']))
result = []
with torch.no_grad(): # no tracking history
for i in range(input_data['words']):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(input_data['temperature']).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
word = word if type(word) == str else word.decode()
if word == '<eos>':
word = '\n'
elif i % 12 == 11:
word = word + '\n'
else:
word = word + ' '
result.append(word)
return ''.join(result)
示例10: test_horovod_allreduce_error
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except torch.FatalError:
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except torch.FatalError:
pass
示例11: test_horovod_allreduce_grad
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = tensor.type(dtype)
tensor = torch.autograd.Variable(tensor, requires_grad=True)
summed = hvd.allreduce(tensor, average=False)
summed.backward(torch.ones([17] * dim))
grad_out = tensor.grad.data.numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
示例12: train_step
def train_step(self, sample, update_params=True, dummy_batch=False):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if not dummy_batch:
self.meters['train_wall'].start()
# forward and backward pass
sample = self._prepare_sample(sample)
loss, sample_size, logging_output, oom_fwd = self._forward(sample)
oom_bwd = self._backward(loss)
# buffer stats and logging outputs
self._buffered_stats['sample_sizes'].append(sample_size)
self._buffered_stats['logging_outputs'].append(logging_output)
self._buffered_stats['ooms_fwd'].append(oom_fwd)
self._buffered_stats['ooms_bwd'].append(oom_bwd)
# update parameters
if update_params:
agg_logging_output = self._update_params()
else:
agg_logging_output = None # buffering updates
if not dummy_batch:
self.meters['train_wall'].stop()
return agg_logging_output
示例13: __init__
def __init__(self, seed=1):
super(NN, self).__init__()
torch.manual_seed(seed)
self.input_size = 1
self.output_size = 1
h_size = 50
# self.net = nn.Sequential(
# nn.Linear(self.input_size,h_size),
# nn.ReLU(),
# nn.Linear(h_size,self.output_size)
# )
self.net = nn.Sequential(
nn.Linear(self.input_size,h_size),
# nn.Tanh(),
# nn.Linear(h_size,h_size),
nn.Tanh(),
nn.Linear(h_size,self.output_size),
# nn.Tanh(),
# nn.Linear(h_size,self.output_size)
)
# self.optimizer = optim.Adam(self.parameters(), lr=.01)
self.optimizer = optim.Adam(self.parameters(), lr=.0004)
示例14: main
def main(argv):
(opt, args) = parser.parse_args(argv)
config = get_config(opt.config)
print(opt)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if config['cuda']:
torch.cuda.manual_seed_all(opt.manualSeed)
torch.cuda.set_device(opt.gpu_ids)
cudnn.benchmark = True
transform = transforms.Compose([transforms.Resize((512, 512)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
dataset = Aligned_Dataset(config['datapath'], subfolder='test', direction='AtoB', transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=int(2))
model_dir = '/media/scw4750/AIwalker/stackgan-like/checkpoints/generator_epoch_160.pkl'
trainer = GAN_Trainer(config, dataloader)
# load the model
trainer.G.load_state_dict(torch.load(model_dir))
trainer.test()
return
示例15: __init__
def __init__(self, hyper_config, seed=1):
super(VAE, self).__init__()
torch.manual_seed(seed)
self.z_size = hyper_config['z_size']
self.x_size = hyper_config['x_size']
self.act_func = hyper_config['act_func']
self.flow_bool = hyper_config['flow_bool']
self.q_dist = hyper_config['q_dist'](self, hyper_config=hyper_config)
if torch.cuda.is_available():
self.dtype = torch.cuda.FloatTensor
self.q_dist.cuda()
else:
self.dtype = torch.FloatTensor
#Decoder
self.fc4 = nn.Linear(self.z_size, 200)
self.fc5 = nn.Linear(200, 200)
self.fc6 = nn.Linear(200, self.x_size)