当前位置: 首页>>代码示例>>Python>>正文


Python Variable.t方法代码示例

本文整理汇总了Python中torch.autograd.Variable.t方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.t方法的具体用法?Python Variable.t怎么用?Python Variable.t使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.t方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: l2l_validate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
def l2l_validate(model, cluster_center, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        data_l = generate_data_l(cluster_center)
        data_n = generate_data_n(cluster_center, model.n_class_n)
        x_l, y_l = Variable(torch.from_numpy(data_l[0])).float(), Variable(
            torch.from_numpy(data_l[1]))
        x_n, y_n = Variable(torch.from_numpy(data_n[0])).float(), Variable(
            torch.from_numpy(data_n[1]))
        pred_ll, pred_nl, w, b = model(x_l, x_n)
        M = Variable(torch.zeros(model.n_class_n, model.n_dim))
        B = Variable(torch.zeros(model.n_class_n))
        for k in range(model.n_class_n):
            M[k] = torch.cat((w[:, 0][y_n == model.n_class_l + k].view(-1, 1),
                              w[:, 1][y_n == model.n_class_l + k].view(-1, 1)), 1).mean(0)
            B[k] = b[y_n == model.n_class_l + k].mean()
        pred_ln = torch.mm(x_l, M.t()) + B.view(1, -1).expand_as(torch.mm(x_l, M.t()))
        pred_nn = torch.mm(x_n, M.t()) + B.view(1, -1).expand_as(torch.mm(x_n, M.t()))
        pred = torch.cat((torch.cat((pred_ll, pred_nl)), torch.cat((pred_ln, pred_nn))), 1)
        pred = pred.data.max(1)[1]
        y = torch.cat((y_l, y_n))
        accuracy = pred.eq(y.data).cpu().sum() * 1.0 / y.size()[0]
        # print('accuracy: %.2f' % accuracy)
        val_accuracy.append(accuracy)
        acc_l = pred.eq(y.data).cpu()[0:100].sum() * 1.0 / 100
        acc_n = pred.eq(y.data).cpu()[100:150].sum() * 1.0 / 50
        print('accuracy: %.2f, lifelong accuracy: %.2f, new accuracy: %.2f' % (accuracy, acc_l, acc_n))

    return numpy.mean(numpy.asarray(val_accuracy))
开发者ID:yangyi02,项目名称:my-scripts,代码行数:31,代码来源:learning_to_learn_lifelong_newclass_trunc.py

示例2: test_remote_var_binary_methods

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
    def test_remote_var_binary_methods(self):
        ''' Unit tests for methods mentioned on issue 1385
            https://github.com/OpenMined/PySyft/issues/1385'''
        hook = TorchHook(verbose=False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 1)
        local.add_worker(remote)

        x = Var(torch.FloatTensor([1, 2, 3, 4])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3, 4]])).send(remote)
        z = torch.matmul(x, y.t())
        assert (torch.equal(z.get(), Var(torch.FloatTensor([30]))))
        z = torch.add(x, y)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([[2, 4, 6, 8]]))))
        x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        z = torch.cross(x, y, dim=1)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))))
        x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        z = torch.dist(x, y)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([0.]))))
        x = Var(torch.FloatTensor([1, 2, 3])).send(remote)
        y = Var(torch.FloatTensor([1, 2, 3])).send(remote)
        z = torch.dot(x, y)
        print(torch.equal(z.get(), Var(torch.FloatTensor([14]))))
        z = torch.eq(x, y)
        assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
        z = torch.ge(x, y)
        assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
开发者ID:TanayGahlot,项目名称:PySyft,代码行数:32,代码来源:torch_test.py

示例3: backward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
    def backward(ctx, grad_output):
        input1, input2, weight, bias = ctx.saved_variables
        grad_input1 = grad_input2 = grad_weight = grad_bias = None

        buff = Variable(input1.data.new())

        if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
            grad_input1 = torch.mm(input2, weight[0].t())
            grad_input1 = grad_input1.mul(grad_output.narrow(1, 0, 1).expand(grad_input1.size()))
            grad_input2 = torch.mm(input1, weight[0])
            grad_input2 = grad_input2.mul(grad_output.narrow(1, 0, 1).expand(grad_input2.size()))

            for k in range(1, weight.size(0)):
                buff = input2.mm(weight[k].t())
                buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input1.size()))
                grad_input1.add_(buff)

                buff = input1.mm(weight[k])
                buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input2.size()))
                grad_input2.add_(buff)

        grad_weight = Variable(weight.data.new(weight.size()))
        if ctx.needs_input_grad[2]:
            # accumulate parameter gradients:
            for k in range(weight.size(0)):
                buff = input1.mul(grad_output.narrow(1, k, 1).expand_as(input1))
                grad_weight[k] = torch.mm(buff.t(), input2)

        if bias is not None and ctx.needs_input_grad[3]:
            grad_bias = grad_output.sum(0, keepdim=False)

        return grad_input1, grad_input2, grad_weight, grad_bias
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:34,代码来源:linear.py

示例4: l2l_validate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
def l2l_validate(model, cluster_center, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        batch = generate_data(cluster_center)
        x, y = Variable(torch.from_numpy(batch[0])).float(), Variable(torch.from_numpy(batch[1]))
        w, b = model(x)
        M = Variable(torch.zeros(model.n_class, model.n_dim))
        B = Variable(torch.zeros(model.n_class))
        for k in range(model.n_class):
            M[k] = torch.cat((w[:, 0][y == k].view(-1, 1), w[:, 1][y == k].view(-1, 1)), 1).mean(0)
            B[k] = b[y == k].mean()
        pred = torch.mm(x, M.t()) + B.view(1, -1).expand_as(torch.mm(x, M.t()))
        pred = pred.data.max(1)[1]
        accuracy = pred.eq(y.data).cpu().sum() / y.size()[0]
        print('accuracy: %.2f' % accuracy)
        val_accuracy.append(accuracy)
    return numpy.mean(numpy.asarray(val_accuracy))
开发者ID:yangyi02,项目名称:my-scripts,代码行数:19,代码来源:learning_to_learn.py

示例5: l2l_train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
def l2l_train(model, cluster_center, n_epoch=10000):
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    for epoch in range(n_epoch):
        batch = generate_data(cluster_center)
        x, y = Variable(torch.from_numpy(batch[0])).float(), Variable(torch.from_numpy(batch[1]))
        optimizer.zero_grad()
        w, b = model(x)
        M = Variable(torch.zeros(model.n_class, model.n_dim))
        B = Variable(torch.zeros(model.n_class))
        for k in range(model.n_class):
            M[k] = torch.cat((w[:, 0][y == k].view(-1, 1), w[:, 1][y == k].view(-1, 1)), 1).mean(0)
            B[k] = b[y == k].mean()
        pred = torch.mm(x, M.t()) + B.view(1, -1).expand_as(torch.mm(x, M.t()))
        loss = F.cross_entropy(pred, y)
        loss.backward()
        optimizer.step()
        print('Train Epoch: {}\tLoss: {:.6f}'.format(epoch, loss.data[0]))
开发者ID:yangyi02,项目名称:my-scripts,代码行数:19,代码来源:learning_to_learn.py

示例6: _calc_deriv_sym

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
    def _calc_deriv_sym(self, A, L, upper):
        # reverse mode
        Lbar = Variable(torch.rand(5, 5).tril())
        if upper:
            Lbar = Lbar.t()
        L.backward(Lbar)
        Abar = A.grad

        return Abar, Lbar
开发者ID:Northrend,项目名称:pytorch,代码行数:11,代码来源:test_potrf.py

示例7: l2l_validate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
def l2l_validate(model, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        data_train, data_test, cluster_center = generate_data(model.n_class, model.n_dim)
        x, y = Variable(torch.from_numpy(data_train[0])).float(), Variable(
            torch.from_numpy(data_train[1]))
        w, b = model(x)
        M = Variable(torch.zeros(model.n_class, model.n_dim))
        B = Variable(torch.zeros(model.n_class))
        for k in range(model.n_class):
            M[k] = torch.cat((w[:, 0][y == k].view(-1, 1), w[:, 1][y == k].view(-1, 1)), 1).mean(0)
            B[k] = b[y == k].mean()
        x_test, y_test = Variable(torch.from_numpy(data_test[0])).float(), Variable(
            torch.from_numpy(data_test[1]))
        pred = torch.mm(x_test, M.t()) + B.view(1, -1).expand_as(torch.mm(x_test, M.t()))
        pred = pred.data.max(1)[1]
        accuracy = pred.eq(y_test.data).cpu().sum() / y_test.size()[0]
        print('accuracy: %.2f' % accuracy)
        if accuracy < 0.99:
            print(numpy.concatenate((cluster_center, M.data.numpy()), 1))
        val_accuracy.append(accuracy)
    return numpy.mean(numpy.asarray(val_accuracy))
开发者ID:yangyi02,项目名称:my-scripts,代码行数:24,代码来源:learning_to_learn_withtest.py

示例8: translate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
    def translate(self, src, max_decode_len=2):
        """
        Translate a single input sequence using greedy decoding..

        Parameters:
        -----------

        src: torch.LongTensor (seq_len x 1)
        """
        pad = self.src_dict.get_pad()
        eos = self.src_dict.get_eos()
        bos = self.src_dict.get_bos()
        gpu = src.is_cuda
        # encode
        emb = self.src_embeddings(src)
        enc_outs, enc_hidden = self.encoder(
            emb, compute_mask=False, mask_symbol=pad)
        # decode
        dec_hidden = self.decoder.init_hidden_for(enc_hidden)
        dec_out, enc_att = None, None
        if self.decoder.att_type == 'Bahdanau':
            enc_att = self.decoder.attn.project_enc_outs(enc_outs)
        scores, hyp, atts = [], [], []
        prev = Variable(src.data.new([bos]), volatile=True).unsqueeze(0)
        if gpu: prev = prev.cuda()
        for _ in range(len(src) * max_decode_len):
            prev_emb = self.trg_embeddings(prev).squeeze(0)
            dec_out, dec_hidden, att_weights = self.decoder(
                prev_emb, dec_hidden, enc_outs, out=dec_out, enc_att=enc_att)
            # (batch x vocab_size)
            outs = self.project(dec_out)
            # (1 x batch) argmax over log-probs (take idx across batch dim)
            best_score, prev = outs.max(1)
            prev = prev.t()
            # concat of step vectors along seq_len dim
            scores.append(best_score.squeeze().data[0])
            atts.append(att_weights.squeeze().data.cpu().numpy().tolist())
            hyp.append(prev.squeeze().data[0])
            # termination criterion: decoding <eos>
            if prev.data.eq(eos).nonzero().nelement() > 0:
                break
        # add singleton dimension for compatibility with other decoding
        return [sum(scores)], [hyp], [atts]
开发者ID:mikekestemont,项目名称:seqmod,代码行数:45,代码来源:encoder_decoder.py

示例9: initialize_network

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
	def initialize_network(self):
		db = self.db

		atom = np.array([[1],[-1]])
		col = np.matlib.repmat(atom, self.width_scale, 1)
		z = np.zeros(((self.input_size-1)*2*self.width_scale, 1))
		one_column = np.vstack((col, z))
		original_column = np.copy(one_column)

		eyeMatrix = torch.eye(self.net_width)
		eyeMatrix = Variable(eyeMatrix.type(self.db['dataType']), requires_grad=False)		

		for i in range(self.input_size-1):
			one_column = np.roll(one_column, 2*self.width_scale)
			original_column = np.hstack((original_column, one_column))

		original_column = torch.tensor(original_column)
		original_column = Variable(original_column.type(self.db['dataType']), requires_grad=False)		


		for i, param in enumerate(self.parameters()):
			if len(param.data.shape) == 1:
				param.data = torch.zeros(param.data.size())
			else:
				if param.data.shape[1] == self.input_size:
					param.data = (1.0/self.width_scale)*original_column
				elif param.data.shape[0] == self.input_size:
					param.data = original_column.t()
				else:
					param.data = eyeMatrix

		#for i, param in enumerate(self.parameters()):
		#	print(param.data)


		self.num_of_linear_layers = 0
		for m in self.children():
			if type(m) == torch.nn.Linear:
				self.num_of_linear_layers += 1
开发者ID:juliaprocess,项目名称:ml_examples,代码行数:41,代码来源:identity_net.py

示例10: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
from torch.autograd import Variable
from numpy.random import randint
from sklearn import metrics
trainData = pd.read_csv('ml100k.train.rating',header=None,names=['user','item','rate'],sep='\t')
testData = pd.read_csv('ml100k.test.rating',header=None,names=['user','item','rate'],sep='\t')

userIdx = trainData.user.values
itemIdx = trainData.item.values
rates = trainData.rate.values

K=20
lambd = 0.00001
learning_rate =1e-6
U = Variable(torch.randn([len(set(userIdx)),K]), requires_grad=True)
P = Variable(torch.randn([len(set(itemIdx)),K]), requires_grad=True)
R = torch.mm(U,P.t())
ratesPred = torch.gather(R.view(1,-1)[0],0,Variable(torch.LongTensor (userIdx * len(set(itemIdx)) + itemIdx)))
diff_op = ratesPred - Variable(torch.FloatTensor(rates))
baseLoss = diff_op.pow(2).sum()

#regularizer = lambd* (U.abs().sum()+P.abs().sum())
regularizer = lambd* (U.pow(2).sum()+P.pow(2).sum())
loss = baseLoss + regularizer 

#optimizer = torch.optim.Adam([U,P], lr = learning_rate)
optimizer = torch.optim.SGD([U,P], lr = learning_rate,momentum = 0.9)
print ('Training')
for i in range(250):
    loss.backward()
    optimizer.step()
    R = torch.mm(U,P.t())
开发者ID:robotsl,项目名称:pytorchMF,代码行数:33,代码来源:pytorchMF.py

示例11: enumerate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
        total_loss = 0.0
        total = 0.0
        for iter, traindata in enumerate(train_loader):
            train_inputs, train_labels = traindata
            #print(train_inputs)
            #print(train_labels)
            train_labels = torch.squeeze(train_labels)

            if use_gpu:
                train_inputs, train_labels = Variable(train_inputs.cuda()), train_labels.cuda()
            else: train_inputs = Variable(train_inputs)

            model.zero_grad()
            model.batch_size = len(train_labels)
            model.hidden = model.init_hidden()
            output = model(train_inputs.t())

            loss = loss_function(output, Variable(train_labels))
            loss.backward()
            optimizer.step()

            # calc training acc
            _, predicted = torch.max(output.data, 1)
            total_acc += (predicted == train_labels).sum()
            total += len(train_labels)
            total_loss += loss.data[0]

        train_loss_.append(total_loss / total)
        train_acc_.append(total_acc / total)
        ## testing epoch
        total_acc = 0.0
开发者ID:hjpwhu,项目名称:Python,代码行数:33,代码来源:semeval_task1a.py

示例12: print

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import t [as 别名]
        print("Val acc, prec, ppl", acc, prec, ppl)
else:
    model.load_state_dict(torch.load(args.model_file))
    acc, prec, ppl = validate()
    print("Val acc, prec, ppl", acc, prec, ppl)

model.eval()
model.eval()
with open("interpolated_ensemble_predictions.csv", "w") as f:
    writer = csv.writer(f)
    writer.writerow(['id','word'])
    for i, l in enumerate(open("input.txt"),1):
        words = [TEXT.vocab.stoi[word] for word in l.split(' ')]
        words = Variable(torch.cuda.LongTensor(words).unsqueeze(1))
        LSTMhidden = fLSTM.initHidden(1)
        GRUhidden = fGRU.initHidden(1)
        LSTMout, LSTMhidden = fLSTM(words, LSTMhidden)
        GRUout, GRUhidden = fGRU(words, GRUhidden)
        pads = Variable(torch.zeros(n-1,words.size(1))).type(torch.cuda.LongTensor)
        padwords = torch.cat([pads,words],dim=0)
        NNLMout = fNNLM(torch.cat([ padwords[:,0:1][b:b+n,:] for b in range(words.size(0)) ],dim=1).t()).unsqueeze(1)
        #print(NNLMout.size())
        out = model(words.t(),LSTMout,GRUout,NNLMout)
        out = out.view(-1,len(TEXT.vocab))[-2]
        #out = out.squeeze(1)[-2] # |V|
        out = F.softmax(out,dim=0)
        _, predicted = torch.sort(out,descending=True)
        predicted = predicted[:20].data.tolist()
        predwords = [TEXT.vocab.itos[x] for x in predicted]
        writer.writerow([i,' '.join(predwords)])
开发者ID:anihamde,项目名称:cs287-s18,代码行数:32,代码来源:interpolated_ensemble.py


注:本文中的torch.autograd.Variable.t方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。