当前位置: 首页>>代码示例>>Python>>正文


Python cuda.LongTensor方法代码示例

本文整理汇总了Python中torch.cuda.LongTensor方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.LongTensor方法的具体用法?Python cuda.LongTensor怎么用?Python cuda.LongTensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.cuda的用法示例。


在下文中一共展示了cuda.LongTensor方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: interpolation

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def interpolation(self, uvm, image, index):
        u, v = torch.index_select(uvm, dim=1, index=LongTensor([0+3*index,
                                  1+3*index])).permute(0, 2, 3, 1).split(1, dim=3)
        row_num = FloatTensor()
        col_num = FloatTensor()
        im_size = image.shape[2:4]
        torch.arange(im_size[0], out=row_num)
        torch.arange(im_size[1], out=col_num)
        row_num = row_num.view(1, im_size[0], 1, 1)
        col_num = col_num.view(1, 1, im_size[1], 1)
        x_norm = 2*(u+col_num)/(im_size[1]-1)-1
        y_norm = 2*(v+row_num)/(im_size[0]-1)-1
        xy_norm = torch.clamp(torch.cat((x_norm, y_norm), dim=3), -1, 1)
        interp = nn.functional.grid_sample(image, xy_norm)
        w = torch.index_select(uvm, dim=1, index=LongTensor([3*index+2]))+0.5
        return interp, w, u, v 
开发者ID:MortenHannemose,项目名称:pytorch-vfi-cft,代码行数:18,代码来源:network.py

示例2: knn_indices_func_cpu

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def knn_indices_func_cpu(rep_pts : FloatTensor,  # (N, pts, dim)
                         pts : FloatTensor,      # (N, x, dim)
                         K : int, D : int
                        ) -> LongTensor:         # (N, pts, K)
    """
    CPU-based Indexing function based on K-Nearest Neighbors search.
    :param rep_pts: Representative points.
    :param pts: Point cloud to get indices from.
    :param K: Number of nearest neighbors to collect.
    :param D: "Spread" of neighboring points.
    :return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
    is the set k-nearest neighbors for the representative points in pts[n].
    """
    rep_pts = rep_pts.data.numpy()
    pts = pts.data.numpy()
    region_idx = []

    for n, p in enumerate(rep_pts):
        P_particular = pts[n]
        nbrs = NearestNeighbors(D*K + 1, algorithm = "ball_tree").fit(P_particular)
        indices = nbrs.kneighbors(p)[1]
        region_idx.append(indices[:,1::D])

    region_idx = torch.from_numpy(np.stack(region_idx, axis = 0))
    return region_idx 
开发者ID:hxdengBerkeley,项目名称:PointCNN.Pytorch,代码行数:27,代码来源:util_funcs.py

示例3: knn_indices_func_cpu

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def knn_indices_func_cpu(rep_pts : FloatTensor,  # (N, pts, dim)
                         pts : FloatTensor,      # (N, x, dim)
                         K : int, D : int
                        ) -> LongTensor:         # (N, pts, K)
    """
    CPU-based Indexing function based on K-Nearest Neighbors search.
    :param rep_pts: Representative points.
    :param pts: Point cloud to get indices from.
    :param K: Number of nearest neighbors to collect.
    :param D: "Spread" of neighboring points.
    :return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
    is the set k-nearest neighbors for the representative points in pts[n].
    """
    if rep_pts.is_cuda:
        rep_pts = rep_pts.cpu()
    if pts.is_cuda:
        pts = pts.cpu()
    rep_pts = rep_pts.data.numpy()
    pts = pts.data.numpy()

    region_idx = []

    for n, p in enumerate(rep_pts):
        P_particular = pts[n]
        nbrs = NearestNeighbors(D*K + 1, algorithm = "auto").fit(P_particular)
        indices = nbrs.kneighbors(p)[1]
        region_idx.append(indices[:,1::D])

    region_idx = torch.from_numpy(np.stack(region_idx, axis = 0))
    return region_idx 
开发者ID:agarret7,项目名称:PointCNN,代码行数:32,代码来源:util_funcs.py

示例4: knn_indices_func_approx

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def knn_indices_func_approx(rep_pts : FloatTensor,  # (N, pts, dim)
                            pts : FloatTensor,      # (N, x, dim)
                            K : int, D : int
                           ) -> LongTensor:         # (N, pts, K)
    """
    Approximate CPU-based Indexing function based on K-Nearest Neighbors search.
    :param rep_pts: Representative points.
    :param pts: Point cloud to get indices from.
    :param K: Number of nearest neighbors to collect.
    :param D: "Spread" of neighboring points.
    :return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
    is the set k-nearest neighbors for the representative points in pts[n].
    """
    if rep_pts.is_cuda:
        rep_pts = rep_pts.cpu()
    if pts.is_cuda:
        pts = pts.cpu()
    rep_pts = rep_pts.data.numpy()
    pts = pts.data.numpy()

    region_idx = []

    for n, p in enumerate(rep_pts):
        P_particular = pts[n]
        lshf = LSHForest(n_estimators = 20, n_candidates = 100, n_neighbors = D*K + 1)
        lshf.fit(P_particular)
        indices = lshf.kneighbors(p, return_distance = False)
        region_idx.append(indices[:,1::D]) 
开发者ID:agarret7,项目名称:PointCNN,代码行数:30,代码来源:util_funcs.py

示例5: knn_indices_func_gpu

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def knn_indices_func_gpu(rep_pts : cuda.FloatTensor,  # (N, pts, dim)
                         pts : cuda.FloatTensor,      # (N, x, dim)
                         K : int, D : int
                        ) -> cuda.LongTensor:         # (N, pts, K)
    """
    GPU-based Indexing function based on K-Nearest Neighbors search.
    Very memory intensive, and thus unoptimal for large numbers of points.
    :param rep_pts: Representative points.
    :param pts: Point cloud to get indices from.
    :param K: Number of nearest neighbors to collect.
    :param D: "Spread" of neighboring points.
    :return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
    is the set k-nearest neighbors for the representative points in pts[n].
    """
    region_idx = []

    for n, qry in enumerate(rep_pts):
        qry = qry.half()
        ref = pts[n].half()
        r_A = torch.sum(qry * qry, dim = 1, keepdim = True)
        r_B = torch.sum(ref * ref, dim = 1, keepdim = True)
        dist2 = r_A - 2 * torch.matmul(qry, torch.t(ref)) + torch.t(r_B)
        _, inds = torch.topk(dist2, D*K + 1, dim = 1, largest = False)
        region_idx.append(inds[:,1::D])

    region_idx = torch.stack(region_idx, dim = 0)

    return region_idx 
开发者ID:agarret7,项目名称:PointCNN,代码行数:30,代码来源:util_funcs.py

示例6: knn_indices_func_gpu

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def knn_indices_func_gpu(rep_pts : cuda.FloatTensor,  # (N, pts, dim)
                         pts : cuda.FloatTensor,      # (N, x, dim)
                         k : int, d : int
                        ) -> cuda.LongTensor:         # (N, pts, K)
    """
    GPU-based Indexing function based on K-Nearest Neighbors search.
    Very memory intensive, and thus unoptimal for large numbers of points.
    :param rep_pts: Representative points.
    :param pts: Point cloud to get indices from.
    :param K: Number of nearest neighbors to collect.
    :param D: "Spread" of neighboring points.
    :return: Array of indices, P_idx, into pts such that pts[n][P_idx[n],:]
    is the set k-nearest neighbors for the representative points in pts[n].
    """
    region_idx = []

    for n, qry in enumerate(rep_pts):
        ref = pts[n]
        n, d = ref.size()
        m, d = qry.size()
        mref = ref.expand(m, n, d)
        mqry = qry.expand(n, m, d).transpose(0, 1)
        dist2 = torch.sum((mqry - mref)**2, 2).squeeze()
        _, inds = torch.topk(dist2, k*d + 1, dim = 1, largest = False)
        region_idx.append(inds[:,1::d])

    region_idx = torch.stack(region_idx, dim = 0)
    return region_idx 
开发者ID:hxdengBerkeley,项目名称:PointCNN.Pytorch,代码行数:30,代码来源:util_funcs.py

示例7: evaluate_model

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def evaluate_model(train_matrix, test_set, item_word_seq, item_neighbor_index, GAT, batch_size):
    num_items, num_users = train_matrix.shape
    num_batches = int(num_items / batch_size) + 1
    item_indexes = np.arange(num_items)
    pred_matrix = None

    for batchID in range(num_batches):
        start = batchID * batch_size
        end = start + batch_size

        if batchID == num_batches - 1:
            if start < num_items:
                end = num_items
            else:
                break

        batch_item_index = item_indexes[start:end]

        # get mini-batch data
        batch_x = train_matrix[batch_item_index].toarray()
        batch_word_seq = item_word_seq[batch_item_index]
        batch_neighbor_index = item_neighbor_index[batch_item_index]

        batch_item_index = Variable(torch.from_numpy(batch_item_index).type(T.LongTensor), requires_grad=False)
        batch_word_seq = Variable(torch.from_numpy(batch_word_seq).type(T.LongTensor), requires_grad=False)
        batch_neighbor_index = Variable(torch.from_numpy(batch_neighbor_index).type(T.LongTensor), requires_grad=False)
        batch_x = Variable(torch.from_numpy(batch_x.astype(np.float32)).type(T.FloatTensor), requires_grad=False)

        # Forward pass: Compute predicted y by passing x to the model
        rating_pred = GAT(batch_item_index, batch_x, batch_word_seq, batch_neighbor_index)
        rating_pred = rating_pred.cpu().data.numpy().copy()
        if batchID == 0:
            pred_matrix = rating_pred.copy()
        else:
            pred_matrix = np.append(pred_matrix, rating_pred, axis=0)

    topk = 50
    pred_matrix[train_matrix.nonzero()] = 0
    pred_matrix = pred_matrix.transpose()
    # reference: https://stackoverflow.com/a/23734295, https://stackoverflow.com/a/20104162
    ind = np.argpartition(pred_matrix, -topk)
    ind = ind[:, -topk:]
    arr_ind = pred_matrix[np.arange(len(pred_matrix))[:, None], ind]
    arr_ind_argsort = np.argsort(arr_ind)[np.arange(len(pred_matrix)), ::-1]
    pred_list = ind[np.arange(len(pred_matrix))[:, None], arr_ind_argsort]

    precision, recall, MAP, ndcg = [], [], [], []
    for k in [5, 10, 15, 20, 30, 40, 50]:
        precision.append(precision_at_k(test_set, pred_list, k))
        recall.append(recall_at_k(test_set, pred_list, k))
        MAP.append(mapk(test_set, pred_list, k))
        ndcg.append(ndcg_k(test_set, pred_list, k))

    return precision, recall, MAP, ndcg 
开发者ID:allenjack,项目名称:GATE,代码行数:56,代码来源:run.py

示例8: forward

# 需要导入模块: from torch import cuda [as 别名]
# 或者: from torch.cuda import LongTensor [as 别名]
def forward(self, batch_item_index, place_correlation):
        """
        The forward pass of the autoencoder.
        :param batch_item_index: a list of arrays that each array stores the place id a user has been to
        :param place_correlation: the pairwise poi relation matrix
        :return: the predicted ratings
        """
        item_vector = self.linear1.weight[:, T.LongTensor(batch_item_index[0].astype(np.int32))]
        # Compute the neighbor inner products
        inner_product = item_vector.t().mm(self.linear4.weight.t())
        item_corr = Variable(
            torch.from_numpy(place_correlation[batch_item_index[0]].toarray()).type(T.FloatTensor))
        inner_product = inner_product * item_corr
        neighbor_product = inner_product.sum(dim=0).unsqueeze(0)

        # Compute the self attention score
        score = F.tanh(self.attention_matrix1.mm(item_vector))
        score = F.softmax(score, dim=1)
        embedding_matrix = score.mm(item_vector.t())
        linear_z = self.self_attention(embedding_matrix.t()).t()

        # print score
        for i in range(1, len(batch_item_index)):
            item_vector = self.linear1.weight[:, T.LongTensor(batch_item_index[i].astype(np.int32))]
            # Compute the neighbor inner products
            inner_product = item_vector.t().mm(self.linear4.weight.t())
            item_corr = Variable(
                torch.from_numpy(place_correlation[batch_item_index[i]].toarray()).type(T.FloatTensor))
            inner_product = inner_product * item_corr
            inner_product = inner_product.sum(dim=0).unsqueeze(0)
            neighbor_product = torch.cat((neighbor_product, inner_product), 0)

            # Compute the self attention score
            score = F.tanh(self.attention_matrix1.mm(item_vector))
            score = F.softmax(score, dim=1)
            embedding_matrix = score.mm(item_vector.t())
            tmp_z = self.self_attention(embedding_matrix.t()).t()
            linear_z = torch.cat((linear_z, tmp_z), 0)

        z = F.tanh(linear_z)
        z = F.dropout(z, training=self.training, p=self.dropout_rate)
        z = F.tanh(self.linear2(z))
        z = F.dropout(z, training=self.training, p=self.dropout_rate)
        d_z = F.tanh(self.linear3(z))
        d_z = F.dropout(d_z, training=self.training, p=self.dropout_rate)
        y_pred = F.sigmoid(self.linear4(d_z) + neighbor_product)

        return y_pred 
开发者ID:allenjack,项目名称:SAE-NAD,代码行数:50,代码来源:model.py


注:本文中的torch.cuda.LongTensor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。