當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.t方法代碼示例

本文整理匯總了Python中torch.t方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.t方法的具體用法?Python torch.t怎麽用?Python torch.t使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.t方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: r_duvenaud

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def r_duvenaud(self, h):
        # layers
        aux = []
        for l in range(len(h)):
            param_sz = self.learn_args[l].size()
            parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
                                                                                      param_sz[0])

            aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))

            for j in range(0, aux[l].size(1)):
                # Mask whole 0 vectors
                aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])

        aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
        return self.learn_modules[0](torch.squeeze(aux)) 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:18,代碼來源:ReadoutFunction.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def forward(self,iput):

		bin_a=None
		level1_rep=None
		[batch_size,_,_]=iput.size()

		for hm,hm_encdr in enumerate(self.rnn_hms):
			hmod=iput[:,:,hm].contiguous()
			hmod=torch.t(hmod).unsqueeze(2)

			op,a= hm_encdr(hmod)
			if level1_rep is None:
				level1_rep=op
				bin_a=a
			else:
				level1_rep=torch.cat((level1_rep,op),1)
				bin_a=torch.cat((bin_a,a),1)
		level1_rep=level1_rep.permute(1,0,2)
		final_rep_1,hm_level_attention_1=self.hm_level_rnn_1(level1_rep)
		final_rep_1=final_rep_1.squeeze(1)
		prediction_m=((self.fdiff1_1(final_rep_1)))
		
		return torch.sigmoid(prediction_m) 
開發者ID:kipoi,項目名稱:models,代碼行數:25,代碼來源:models.py

示例3: calculate_positive_embedding_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def calculate_positive_embedding_loss(self, z, positive_edges):
        """
        Calculating the loss on the positive edge embedding distances
        :param z: Hidden vertex representation.
        :param positive_edges: Positive training edges.
        :return loss_term: Loss value on positive edge embedding.
        """
        self.positive_surrogates = [random.choice(self.nodes) for node in range(positive_edges.shape[1])]
        self.positive_surrogates = torch.from_numpy(np.array(self.positive_surrogates, dtype=np.int64).T)
        self.positive_surrogates = self.positive_surrogates.type(torch.long).to(self.device)
        positive_edges = torch.t(positive_edges)
        self.positive_z_i = z[positive_edges[:, 0], :]
        self.positive_z_j = z[positive_edges[:, 1], :]
        self.positive_z_k = z[self.positive_surrogates, :]
        norm_i_j = torch.norm(self.positive_z_i-self.positive_z_j, 2, 1, True).pow(2)
        norm_i_k = torch.norm(self.positive_z_i-self.positive_z_k, 2, 1, True).pow(2)
        term = norm_i_j-norm_i_k
        term[term < 0] = 0
        loss_term = term.mean()
        return loss_term 
開發者ID:benedekrozemberczki,項目名稱:SGCN,代碼行數:22,代碼來源:sgcn.py

示例4: calculate_negative_embedding_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def calculate_negative_embedding_loss(self, z, negative_edges):
        """
        Calculating the loss on the negative edge embedding distances
        :param z: Hidden vertex representation.
        :param negative_edges: Negative training edges.
        :return loss_term: Loss value on negative edge embedding.
        """
        self.negative_surrogates = [random.choice(self.nodes) for node in range(negative_edges.shape[1])]
        self.negative_surrogates = torch.from_numpy(np.array(self.negative_surrogates, dtype=np.int64).T)
        self.negative_surrogates = self.negative_surrogates.type(torch.long).to(self.device)
        negative_edges = torch.t(negative_edges)
        self.negative_z_i = z[negative_edges[:, 0], :]
        self.negative_z_j = z[negative_edges[:, 1], :]
        self.negative_z_k = z[self.negative_surrogates, :]
        norm_i_j = torch.norm(self.negative_z_i-self.negative_z_j, 2, 1, True).pow(2)
        norm_i_k = torch.norm(self.negative_z_i-self.negative_z_k, 2, 1, True).pow(2)
        term = norm_i_k-norm_i_j
        term[term < 0] = 0
        loss_term = term.mean()
        return loss_term 
開發者ID:benedekrozemberczki,項目名稱:SGCN,代碼行數:22,代碼來源:sgcn.py

示例5: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+'/box_data.mat')['boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+'/op_data.mat')['ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+'/sym_data.mat')['syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in range(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
開發者ID:kevin-kaixu,項目名稱:grass_pytorch,代碼行數:21,代碼來源:grassdata.py

示例6: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+u'/box_data.mat')[u'boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+u'/op_data.mat')[u'ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+u'/sym_data.mat')[u'syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in xrange(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
開發者ID:kevin-kaixu,項目名稱:grass_pytorch,代碼行數:21,代碼來源:grassdata.py

示例7: __getitem__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def __getitem__(self, index):
        """
        Returns a single noisy sample. Multiple samples are fed to the collater
        create a noising dataset batch.
        """
        src_tokens = self.src_dataset[index]
        src_lengths = torch.LongTensor([len(src_tokens)])
        src_tokens = src_tokens.unsqueeze(0)

        # Transpose src tokens to fit expected shape of x in noising function
        # (batch size, sequence length) -> (sequence length, batch size)
        src_tokens_t = torch.t(src_tokens)

        with data_utils.numpy_seed(self.seed + index):
            noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths)

        # Transpose back to expected src_tokens format
        # (sequence length, 1) -> (1, sequence length)
        noisy_src_tokens = torch.t(noisy_src_tokens)
        return noisy_src_tokens[0] 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:22,代碼來源:noising.py

示例8: predict

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def predict(self, x):
        batch_size, dims = x.size()
        query = F.normalize(self.query_proj(x), dim=1)

        # Find the k-nearest neighbors of the query
        scores = torch.matmul(query, torch.t(self.keys_var))
        cosine_similarity, topk_indices_var = torch.topk(scores, self.top_k, dim=1)

        # softmax of cosine similarities - embedding
        softmax_score = F.softmax(self.softmax_temperature * cosine_similarity)

        # retrive memory values - prediction
        y_hat_indices = topk_indices_var.data[:, 0]
        y_hat = self.values[y_hat_indices]

        return y_hat, softmax_score 
開發者ID:RUSH-LAB,項目名稱:LSH_Memory,代碼行數:18,代碼來源:memory.py

示例9: evaluate

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def evaluate(data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0.
    ntokens = len(corpus.dictionary)
    memory = model.module.initial_state(eval_batch_size, trainable=False).to(device)

    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i)
            data = torch.t(data)

            loss, memory = model(data, memory, targets)
            loss = torch.mean(loss)

            # data has shape [T * B, N]
            total_loss += args.bptt * loss.item()

    return total_loss / len(data_source) 
開發者ID:L0SG,項目名稱:relational-rnn-pytorch,代碼行數:21,代碼來源:train_rmc.py

示例10: zca_matrix

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def zca_matrix(data_tensor):
    """
    Helper function: compute ZCA whitening matrix across a dataset ~ (N, C, H, W).
    """
    # 1. flatten dataset:
    X = data_tensor.view(data_tensor.shape[0], -1)
    
    # 2. zero-center the matrix:
    X = rescale(X, -1., 1.)
    
    # 3. compute covariances:
    cov = torch.t(X) @ X

    # 4. compute ZCA(X) == U @ (diag(1/S)) @ torch.t(V) where U, S, V = SVD(cov):
    U, S, V = torch.svd(cov)
    return (U @ torch.diag(torch.reciprocal(S)) @ torch.t(V)) 
開發者ID:paultsw,項目名稱:nice_pytorch,代碼行數:18,代碼來源:make_datasets.py

示例11: score_snippets

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def score_snippets(snippets, scorer):
    """ Scores snippets given a scorer.

    Inputs:
        snippets (list of Snippet): The snippets to score.
        scorer (dy.Expression): Dynet vector against which to score  the snippets.

    Returns:
        dy.Expression, list of str, where the first is the scores and the second
            is the names of the snippets that were scored.
    """
    snippet_expressions = [snippet.embedding for snippet in snippets]
    all_snippet_embeddings = torch.stack(snippet_expressions, dim=1)

    scores = torch.t(torch.mm(torch.t(scorer), all_snippet_embeddings))

    if scores.size()[0] != len(snippets):
        raise ValueError("Got " + str(scores.size()[0]) + " scores for " + str(len(snippets)) + " snippets")

    return scores, [snippet.name for snippet in snippets] 
開發者ID:ryanzhumich,項目名稱:editsql,代碼行數:22,代碼來源:token_predictor.py

示例12: linspace_vector

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def linspace_vector(start, end, n_points):
	# start is either one value or a vector
	size = np.prod(start.size())

	assert(start.size() == end.size())
	if size == 1:
		# start and end are 1d-tensors
		res = torch.linspace(start, end, n_points)
	else:
		# start and end are vectors
		res = torch.Tensor()
		for i in range(0, start.size(0)):
			res = torch.cat((res, 
				torch.linspace(start[i], end[i], n_points)),0)
		res = torch.t(res.reshape(start.size(0), n_points))
	return res 
開發者ID:YuliaRubanova,項目名稱:latent_ode,代碼行數:18,代碼來源:utils.py

示例13: normalize_data

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def normalize_data(data):
	reshaped = data.reshape(-1, data.size(-1))

	att_min = torch.min(reshaped, 0)[0]
	att_max = torch.max(reshaped, 0)[0]

	# we don't want to divide by zero
	att_max[ att_max == 0.] = 1.

	if (att_max != 0.).all():
		data_norm = (data - att_min) / att_max
	else:
		raise Exception("Zero!")

	if torch.isnan(data_norm).any():
		raise Exception("nans!")

	return data_norm, att_min, att_max 
開發者ID:YuliaRubanova,項目名稱:latent_ode,代碼行數:20,代碼來源:utils.py

示例14: normalize_masked_data

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def normalize_masked_data(data, mask, att_min, att_max):
	# we don't want to divide by zero
	att_max[ att_max == 0.] = 1.

	if (att_max != 0.).all():
		data_norm = (data - att_min) / att_max
	else:
		raise Exception("Zero!")

	if torch.isnan(data_norm).any():
		raise Exception("nans!")

	# set masked out elements back to zero 
	data_norm[mask == 0] = 0

	return data_norm, att_min, att_max 
開發者ID:YuliaRubanova,項目名稱:latent_ode,代碼行數:18,代碼來源:utils.py

示例15: mutual_cost_mat

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import t [as 別名]
def mutual_cost_mat(self, index1, index2):
        embs1 = self.emb_model[0](index1)  # (batch_size1, dim)
        embs2 = self.emb_model[1](index2)  # (batch_size2, dim)
        if self.cost_type == 'cosine':
            # cosine similarity
            energy1 = torch.sqrt(torch.sum(embs1 ** 2, dim=1, keepdim=True))  # (batch_size1, 1)
            energy2 = torch.sqrt(torch.sum(embs2 ** 2, dim=1, keepdim=True))  # (batch_size2, 1)
            cost = 1-torch.exp(-(1-torch.matmul(embs1, torch.t(embs2))/(torch.matmul(energy1, torch.t(energy2))+1e-5)))
        else:
            # Euclidean distance
            embs = torch.matmul(embs1, torch.t(embs2))  # (batch_size1, batch_size2)
            # (batch_size1, batch_size2)
            embs_diag1 = torch.diag(torch.matmul(embs1, torch.t(embs1))).view(-1, 1).repeat(1, embs2.size(0))
            # (batch_size2, batch_size1)
            embs_diag2 = torch.diag(torch.matmul(embs2, torch.t(embs2))).view(-1, 1).repeat(1, embs1.size(0))
            cost = 1-torch.exp(-(embs_diag1 + torch.t(embs_diag2) - 2 * embs)/embs1.size(1))
        return cost 
開發者ID:HongtengXu,項目名稱:gwl,代碼行數:19,代碼來源:GromovWassersteinLearning.py


注:本文中的torch.t方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。