当前位置: 首页>>代码示例>>Python>>正文


Python Variable.type方法代码示例

本文整理汇总了Python中torch.autograd.Variable.type方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.type方法的具体用法?Python Variable.type怎么用?Python Variable.type使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.type方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: gaussian_kernel

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def gaussian_kernel(self, x, σ):			#Each row is a sample
		bs = x.shape[0]
		K = self.db['dataType'](bs, bs)
		K = Variable(K.type(self.db['dataType']), requires_grad=False)		

		for i in range(bs):
			dif = x[i,:] - x
			K[i,:] = torch.exp(-torch.sum(dif*dif, dim=1)/(2*σ*σ))

		return K
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:12,代码来源:identity_net.py

示例2: optimize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def optimize(self):
		db = self.db
		lr = db['learning_rate']
		optimizer = db['model'].get_optimizer(lr)
		Dloader = db['data_loader']

		for epoch in range(db['epoc_loop']):
			for i, data in enumerate(Dloader, 0):
				inputs, labels = data
				
				inputs = Variable(inputs.type(db['dataType']), requires_grad=False)
				labels = Variable(labels.type(db['dataType']), requires_grad=False)

				y_pred = db['model'](inputs)
				loss = db['model'].compute_loss(labels, y_pred)

				db['model'].zero_grad()	
				optimizer.zero_grad()
				loss.backward()
				optimizer.step()

				print loss.grad
				import pdb; pdb.set_trace()	

				#lr = lr*0.999
				#optimizer = db['model'].get_optimizer(lr)
				#print lr

				#if loss.data[0] < 0.01: Dloader = self.db['data_loader_full']


			#if np.random.rand() > 0.99:
			#	lr = lr*0.90
			#	print '\t-----------------' , lr
			#	#lr = lr / (epoch + 1)
			#	#print '\t' , lr
			#	#for param_group in optimizer.param_groups: param_group['lr'] = lr


			if db['print_loss']: print epoch, loss.data[0]
			if loss.data[0] < 0.001: break;
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:43,代码来源:pytorch_base.py

示例3: initialize_network

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def initialize_network(self):
		db = self.db

		atom = np.array([[1],[-1]])
		col = np.matlib.repmat(atom, self.width_scale, 1)
		z = np.zeros(((self.input_size-1)*2*self.width_scale, 1))
		one_column = np.vstack((col, z))
		original_column = np.copy(one_column)

		eyeMatrix = torch.eye(self.net_width)
		eyeMatrix = Variable(eyeMatrix.type(self.db['dataType']), requires_grad=False)		

		for i in range(self.input_size-1):
			one_column = np.roll(one_column, 2*self.width_scale)
			original_column = np.hstack((original_column, one_column))

		original_column = torch.tensor(original_column)
		original_column = Variable(original_column.type(self.db['dataType']), requires_grad=False)		


		for i, param in enumerate(self.parameters()):
			if len(param.data.shape) == 1:
				param.data = torch.zeros(param.data.size())
			else:
				if param.data.shape[1] == self.input_size:
					param.data = (1.0/self.width_scale)*original_column
				elif param.data.shape[0] == self.input_size:
					param.data = original_column.t()
				else:
					param.data = eyeMatrix

		#for i, param in enumerate(self.parameters()):
		#	print(param.data)


		self.num_of_linear_layers = 0
		for m in self.children():
			if type(m) == torch.nn.Linear:
				self.num_of_linear_layers += 1
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:41,代码来源:identity_net.py

示例4: get_loss

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def get_loss(ckernel_net, data_loader):
		#	Compute final average loss
		loss_sum = 0
		for idx, data in enumerate(data_loader):
			data = Variable(data.type(dtype), requires_grad=False)
			try:
				loss = ckernel_net.CAE_compute_loss(data)
			except:
				import pdb; pdb.set_trace()
			loss_sum += loss
		
		avgL = loss_sum/idx
		return avgL.cpu().data.numpy()[0]
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:15,代码来源:cnn_kernel_net.py

示例5: get_loss

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def get_loss(ckernel_net, data_loader):
	#	Compute final average loss
	for idx, (data, target) in enumerate(data_loader):
		data = Variable(data.type(db['dataType']))
		loss = ckernel_net.CAE_compute_loss(data)


	dataOut = ckernel_net(data)
	dataOut = dataOut.cpu().data.numpy()

	allocation = KMeans(10).fit_predict(dataOut)
	nmi = normalized_mutual_info_score(allocation, target.numpy())
	return [loss.cpu().data.numpy()[0], nmi]
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:15,代码来源:mnist.py

示例6: compute_a_batch

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def compute_a_batch(batch, my_model, eval_mode, loss_criterion=None, add_graph=False, log_dir=None):

    obs_res = batch['ans_scores']
    obs_res = Variable(obs_res.type(torch.FloatTensor))
    if use_cuda:
        obs_res = obs_res.cuda()

    n_sample = obs_res.size(0)
    logit_res = one_stage_run_model(batch, my_model, add_graph, log_dir, eval_mode)
    predicted_scores = torch.sum(compute_score_with_logits(logit_res, obs_res.data))

    total_loss = None if loss_criterion is None else loss_criterion(logit_res, obs_res)

    return predicted_scores, total_loss, n_sample
开发者ID:xiaojie18,项目名称:pythia,代码行数:16,代码来源:Engineer.py

示例7: view_xout

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def view_xout():
	[face_data, data_loader, result] = load_data()
	ckernel_net = result['kernel_net']
	
	for idx, data in enumerate(data_loader):
		data = Variable(data.type(dtype), requires_grad=False)
		xout = ckernel_net.CAE_forward(data)

		#xout = ckernel_net(data[1,:,:,:])
		#print(data[1,:,:,:].unsqueeze(dim=0))
		#single_data = data[1,:,:,:].unsqueeze(dim=0)
		#xout = ckernel_net(single_data)
		#import pdb; pdb.set_trace()
		#face_data.display_image(xout[1,0,:,:].cpu().data.numpy())
		import pdb; pdb.set_trace()
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:17,代码来源:load_kernel.py

示例8: gaussian_kernel

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def gaussian_kernel(self, x):			#Each row is a sample
		bs = x.shape[0]
		s = self.sigma

		if bs < 50:	# Compute raw RBF kernel
			K = self.db['dataType'](bs, bs)
			K = Variable(K.type(self.db['dataType']), requires_grad=False)		
	
			for i in range(bs):
				for j in range(bs):
					tmpY = (x[i,:] - x[j,:]).unsqueeze(0)
					eVal = -(torch.mm(tmpY, tmpY.transpose(0,1)))/(2*s*s)
					K[i,j] = torch.exp(eVal)
		else: # use RFF
			K = self.rff.get_rbf(x, s, True)
			
		return K
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:19,代码来源:cnn_kernel_net.py

示例9: sample_images

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def sample_images(batches_done):
    """Saves a generated sample from the validation set"""
    imgs = next(iter(val_dataloader))
    img_samples = None
    for img_A, img_B in zip(imgs['A'], imgs['B']):
        # Repeat input image by number of channels
        real_A = img_A.view(1, *img_A.shape).repeat(8, 1, 1, 1)
        real_A = Variable(real_A.type(Tensor))
        # Get interpolated noise [-1, 1]
        sampled_z = np.repeat(np.linspace(-1, 1, 8)[:, np.newaxis], opt.latent_dim, 1)
        sampled_z = Variable(Tensor(sampled_z))
        # Generator samples
        fake_B = generator(real_A, sampled_z)
        # Concatenate samples horisontally
        fake_B = torch.cat([x for x in fake_B.data.cpu()], -1)
        img_sample = torch.cat((img_A, fake_B), -1)
        img_sample = img_sample.view(1, *img_sample.shape)
        # Cocatenate with previous samples vertically
        img_samples = img_sample if img_samples is None else torch.cat((img_samples, img_sample), -2)
    save_image(img_samples, 'images/%s/%s.png' % (opt.dataset_name, batches_done), nrow=5, normalize=True)
开发者ID:hjpwhu,项目名称:PyTorch-GAN,代码行数:22,代码来源:bicyclegan.py

示例10: sample_images

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def sample_images(batches_done):
    """Saves a generated sample from the validation set"""
    imgs = next(iter(val_dataloader))
    img_samples = None
    for img1, img2 in zip(imgs['A'], imgs['B']):
        # Create copies of image
        X1 = img1.unsqueeze(0).repeat(opt.style_dim, 1, 1, 1)
        X1 = Variable(X1.type(Tensor))
        # Get interpolated style codes
        s_code = np.repeat(np.linspace(-1, 1, opt.style_dim)[:, np.newaxis], opt.style_dim, 1)
        s_code = Variable(Tensor(s_code))
        # Generate samples
        c_code_1, _ = Enc1(X1)
        X12 = Dec2(c_code_1, s_code)
        # Concatenate samples horisontally
        X12 = torch.cat([x for x in X12.data.cpu()], -1)
        img_sample = torch.cat((img1, X12), -1).unsqueeze(0)
        # Concatenate with previous samples vertically
        img_samples = img_sample if img_samples is None else torch.cat((img_samples, img_sample), -2)
    save_image(img_samples, 'images/%s/%s.png' % (opt.dataset_name, batches_done), nrow=5, normalize=True)
开发者ID:hjpwhu,项目名称:PyTorch-GAN,代码行数:22,代码来源:munit.py

示例11: rescale

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
def rescale(data, db):

	Dnumpy = data.numpy()
	full_stack = None
	for m in range(Dnumpy.shape[0]):
		image = transform.resize(Dnumpy[m,0,:,:], (29,29), mode='constant')

		#plt.imshow(image, cmap='gray')
		#plt.show()
		#import pdb; pdb.set_trace()
	
		image = torch.from_numpy(image)
		image = image.unsqueeze(dim=0).unsqueeze(dim=0)
	
		if type(full_stack) == type(None):
			full_stack = image
		else:
			full_stack = torch.cat((full_stack, image), dim=0)

	full_stack = Variable(full_stack.type(db['dataType']))
	return full_stack
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:23,代码来源:mnist.py

示例12: __len__

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
	def __len__(self):
		return len(self.image_files)

	def __getitem__(self, idx):
		img_name = os.path.join(self.root_dir, self.image_files[idx])
		image = io.imread(img_name)
		image = transform.resize(image, (29,29), mode='constant')

		if len(image.shape) == 2:
			image = np.expand_dims(image,0)
		elif len(image.shape) == 3:
			image = np.moveaxis(image, -1, 0)

		image = torch.from_numpy(image)
		return image


if __name__ == '__main__':
	face_data = image_datasets('../../dataset/faces/', 'face_img')
	data_loader = DataLoader(face_data, batch_size=5, shuffle=True, num_workers=4)
	conv1 = nn.Conv2d(1, 10, kernel_size=5)
	
	for i, data in enumerate(data_loader, 0):
		print(data.shape)
		data = Variable(data.type(torch.FloatTensor), requires_grad=False)
		print(data.shape)
		x = conv1(data)
		print(x.shape)
		import pdb; pdb.set_trace()	

开发者ID:juliaprocess,项目名称:ml_examples,代码行数:31,代码来源:img_load.py

示例13: load_classes

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
classes = load_classes(opt.class_path) # Extracts class labels from file

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

imgs = []           # Stores image paths
img_detections = [] # Stores detections for each image index
label = []
img_path = []

print ('\nPerforming object detection:')
prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
    print(img_paths)
    # Configure input
    input_imgs = Variable(input_imgs.type(Tensor))

    # Get detections
    with torch.no_grad():
        detections = model(input_imgs)
        detections = non_max_suppression(detections, 80, opt.conf_thres, opt.nms_thres)


    # Log progress
    current_time = time.time()
    inference_time = datetime.timedelta(seconds=current_time - prev_time)
    prev_time = current_time
    print ('\t+ Batch %d, Inference Time: %s' % (batch_i, inference_time))

    # Save image and detections
    imgs.extend(img_paths)
开发者ID:cf904c27,项目名称:PyTorch-YOLOv3,代码行数:32,代码来源:detect.py

示例14: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]


d = 3
hidden_d = 4
output_d = 1
#dtype = torch.FloatTensor
dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU

learning_rate = 1

x = np.array([[1,0,0],[1,0,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]])
y = np.array([[0.3],[0.3],[0.6],[0.6],[1],[1]])

x = torch.from_numpy(x)
x = Variable(x.type(dtype), requires_grad=True)		#True cus variable

y = torch.from_numpy(y)
y = Variable(y.type(dtype), requires_grad=False)	#False cus constant, if this changes, network needs to be rebuilt

#	Network structure
NN = torch.nn.Sequential(
	torch.nn.Linear(d, hidden_d, bias=True),
	torch.nn.ReLU(),
	torch.nn.Linear(hidden_d, hidden_d, bias=True),
	torch.nn.ReLU(),
	torch.nn.Linear(hidden_d, output_d, bias=True),
	torch.nn.Sigmoid(),
)
NN = NN.cuda()
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:31,代码来源:basic_NN_cuda.py

示例15: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import type [as 别名]
#	Optimize x^T A x + b^T x
#	A = [1 0;0 2] , b = [1, 2] , solution = -[1/2 1/2]


import torch
from torch.autograd import Variable
import numpy as np
from minConf_PQN import *

dtype = torch.FloatTensor
#dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU

learning_rate = 0.1

x = torch.from_numpy(np.ones((2,1)))
x = Variable(x.type(dtype), requires_grad=True)

A = torch.from_numpy(np.array([[1,0],[0,2]]))
A = Variable(A.type(dtype), requires_grad=False)

b = torch.from_numpy(np.array([[1],[2]]))
b = Variable(b.type(dtype), requires_grad=False)

for m in range(30):
	opt1 = torch.mm(x.transpose(0,1), A)
	loss = torch.mm(opt1, x) + torch.mm(b.transpose(0,1),x) 
	loss.backward()


	minConf_PQN(funObj, x, funProj, options=None)
开发者ID:juliaprocess,项目名称:chieh_libs,代码行数:32,代码来源:autoGrad.py


注:本文中的torch.autograd.Variable.type方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。