当前位置: 首页>>代码示例>>Python>>正文


Python bottleneck.argpartition方法代码示例

本文整理汇总了Python中bottleneck.argpartition方法的典型用法代码示例。如果您正苦于以下问题:Python bottleneck.argpartition方法的具体用法?Python bottleneck.argpartition怎么用?Python bottleneck.argpartition使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在bottleneck的用法示例。


在下文中一共展示了bottleneck.argpartition方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: evaluate_emb

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def evaluate_emb(emb, labels):
    """Evaluate embeddings based on Recall@k."""
    d_mat = get_distance_matrix(emb)
    d_mat = d_mat.asnumpy()
    labels = labels.asnumpy()

    names = []
    accs = []
    for k in [1, 2, 4, 8, 16]:
        names.append('Recall@%d' % k)
        correct, cnt = 0.0, 0.0
        for i in range(emb.shape[0]):
            d_mat[i, i] = 1e10
            nns = argpartition(d_mat[i], k)[:k]
            if any(labels[i] == labels[nn] for nn in nns):
                correct += 1
            cnt += 1
        accs.append(correct/cnt)
    return names, accs 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:train.py

示例2: ndcg

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def ndcg(X_pred, heldout_batch, k=100):
    '''
    normalized discounted cumulative gain@k for binary relevance
    ASSUMPTIONS: all the 0's in heldout_data indicate 0 relevance
    '''
    batch_users = X_pred.shape[0]
    idx_topk_part = bn.argpartition(-X_pred, k, axis=1)
    topk_part = X_pred[np.arange(batch_users)[:, np.newaxis],
                       idx_topk_part[:, :k]]
    idx_part = np.argsort(-topk_part, axis=1)
    # X_pred[np.arange(batch_users)[:, np.newaxis], idx_topk] is the sorted
    # topk predicted score
    idx_topk = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]
    # build the discount template
    tp = 1. / np.log2(np.arange(2, k + 2))

    DCG = (heldout_batch[np.arange(batch_users)[:, np.newaxis],
                         idx_topk].toarray() * tp).sum(axis=1)
    IDCG = np.array([(tp[:min(n, k)]).sum()
                     for n in heldout_batch.getnnz(axis=1)])
    return DCG / IDCG 
开发者ID:ilya-shenbin,项目名称:RecVAE,代码行数:23,代码来源:utils.py

示例3: eval_apk

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def eval_apk(true_scores, pred_scores, topk):
    idx = bottleneck.argpartition(-pred_scores, topk)[:topk]  # find the top-k smallest
    noise = np.random.random(topk)
    if not isinstance(pred_scores, np.ndarray):
        pred_scores = np.array(pred_scores)
    if not isinstance(true_scores, np.ndarray):
        true_scores = np.array(true_scores)
    rec = sorted(zip(pred_scores[idx], noise, true_scores[idx]), reverse=True)
    nhits = 0.
    k = topk if topk >= 0 else len(rec)
    sumap = 0.0
    for i in range(len(rec)):
        if (rec[i][-1] != 0.):
            nhits += 1.0
            if i < k:
                sumap += nhits / (i+1.0)
            else:
                break
    nhits = np.sum(true_scores)
    if nhits != 0:
        sumap /= min(nhits, k)
        return sumap
    else:
        return 0. 
开发者ID:chentingpc,项目名称:NNCF,代码行数:26,代码来源:metrics_ranking.py

示例4: NDCG_binary_at_k_batch

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def NDCG_binary_at_k_batch(X_pred, heldout_batch, k=100):
    '''
    normalized discounted cumulative gain@k for binary relevance
    ASSUMPTIONS: all the 0's in heldout_data indicate 0 relevance
    '''
    batch_users = X_pred.shape[0]
    idx_topk_part = bn.argpartition(-X_pred, k, axis=1)
    topk_part = X_pred[np.arange(batch_users)[:, np.newaxis],
                       idx_topk_part[:, :k]]
    idx_part = np.argsort(-topk_part, axis=1)
    # X_pred[np.arange(batch_users)[:, np.newaxis], idx_topk] is the sorted
    # topk predicted score
    idx_topk = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]
    # build the discount template
    tp = 1. / np.log2(np.arange(2, k + 2))

    DCG = (heldout_batch[np.arange(batch_users)[:, np.newaxis],
                         idx_topk].toarray() * tp).sum(axis=1)
    IDCG = np.array([(tp[:min(n, k)]).sum()
                     for n in heldout_batch.getnnz(axis=1)])
    return DCG / IDCG 
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:23,代码来源:split_train_validation_test_VAE_CF.py

示例5: _find_constrained_bicluster

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def _find_constrained_bicluster(self, data):
        """Find a k x l bicluster."""
        num_rows, num_cols = data.shape

        k = random.randint(1, math.ceil(num_rows / 2))
        l = random.randint(1, math.ceil(num_cols / 2))

        cols = np.random.choice(num_cols, size=l, replace=False)

        old_avg, avg = float('-inf'), 0.0

        while abs(avg - old_avg) > self.tol:
            old_avg = avg

            row_sums = np.sum(data[:, cols], axis=1)
            rows = bn.argpartition(row_sums, num_rows - k)[-k:] # this is usually faster than rows = np.argsort(row_sums)[-k:]

            col_sums = np.sum(data[rows, :], axis=0)
            cols = bn.argpartition(col_sums, num_cols - l)[-l:] # this is usually faster than cols = np.argsort(col_sums)[-l:]

            avg = np.mean(data[np.ix_(rows, cols)])

        return Bicluster(rows, cols) 
开发者ID:padilha,项目名称:biclustlib,代码行数:25,代码来源:las.py

示例6: top_n_indexes

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def top_n_indexes(arr, n):
        idx = bn.argpartition(arr, arr.size-n, axis=None)[-n:]
        width = arr.shape[1]
        return [divmod(i, width) for i in idx] 
开发者ID:ramakanth-pasunuru,项目名称:video_captioning_rl,代码行数:6,代码来源:seq2seq_atten.py

示例7: top_n_indexes

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def top_n_indexes(arr, n):
    idx = bn.argpartition(arr, arr.size - n, axis=None)[-n:]
    width = arr.shape[1]
    return [divmod(i, width) for i in idx] 
开发者ID:HaojiHu,项目名称:Sets2Sets,代码行数:6,代码来源:Sets2Sets.py

示例8: evaluate_emb

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def evaluate_emb(emb, labels):
    """Evaluate embeddings based on Recall@k."""
    d_mat = get_distance_matrix(emb)
    d_mat = d_mat.asnumpy()
    labels = labels.asnumpy()

    names = []
    accs = []
    for k in [1, 2, 4, 8, 16]:
        names.append('Recall@%d' % k)
        correct, cnt = 0.0, 0.0
        for i in range(emb.shape[0]):
            d_mat[i, i] = 1e10
            nns = argpartition(d_mat[i], k)[:k]
            if any(labels[i] == labels[nn] for nn in nns):
                correct += 1
            cnt += 1
        accs.append(correct/cnt)
    return names, accs

#def validate(val_loader, model, criterion, args):
#    outputs = []
#    labels = []
#
#    model.eval()
#    
#    with torch.no_grad():
#        end = time.time()
#        for i, (input, target) in enumerate(val_loader):
#            outpus += model(input)[-1].cpu().tolist()
#            labels += target.cpu().tolist()
# 
开发者ID:suruoxi,项目名称:DistanceWeightedSampling,代码行数:34,代码来源:train.py

示例9: recall

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def recall(X_pred, heldout_batch, k=100):
    batch_users = X_pred.shape[0]

    idx = bn.argpartition(-X_pred, k, axis=1)
    X_pred_binary = np.zeros_like(X_pred, dtype=bool)
    X_pred_binary[np.arange(batch_users)[:, np.newaxis], idx[:, :k]] = True

    X_true_binary = (heldout_batch > 0).toarray()
    tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(
        np.float32)
    recall = tmp / np.minimum(k, X_true_binary.sum(axis=1))
    return recall 
开发者ID:ilya-shenbin,项目名称:RecVAE,代码行数:14,代码来源:utils.py

示例10: eval_multiple

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def eval_multiple(true_scores, pred_scores, topk):
    idx = bottleneck.argpartition(-pred_scores, topk)[:topk]
    noise = np.random.random(topk)
    if not isinstance(pred_scores, np.ndarray):
        pred_scores = np.array(pred_scores)
    if not isinstance(true_scores, np.ndarray):
        true_scores = np.array(true_scores)
    rec = sorted(zip(pred_scores[idx], noise, true_scores[idx]), reverse=True)
    nhits = 0.
    nhits_topk = 0.
    k = topk if topk >= 0 else len(rec)
    sumap = 0.0
    for i in range(len(rec)):
        if rec[i][-1] != 0.:
            nhits += 1.0
            if i < k:
                nhits_topk += 1
                sumap += nhits / (i+1.0)
    nhits = np.sum(true_scores)
    if nhits != 0:
        sumap /= min(nhits, k)
        map_at_k = sumap
        recall_at_k = nhits_topk / nhits
        precision_at_k = nhits_topk / k
    else:
        map_at_k = 0.
        recall_at_k = 0.
        precision_at_k = 0.

    return map_at_k, recall_at_k, precision_at_k 
开发者ID:chentingpc,项目名称:NNCF,代码行数:32,代码来源:metrics_ranking.py

示例11: Recall_at_k_batch

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def Recall_at_k_batch(X_pred, heldout_batch, k=100):
    batch_users = X_pred.shape[0]

    idx = bn.argpartition(-X_pred, k, axis=1)
    X_pred_binary = np.zeros_like(X_pred, dtype=bool)
    X_pred_binary[np.arange(batch_users)[:, np.newaxis], idx[:, :k]] = True

    X_true_binary = (heldout_batch > 0).toarray()
    tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(
        np.float32)
    recall = tmp / np.minimum(k, X_true_binary.sum(axis=1))
    return recall





##############################################################################################################################################################



















############################ Train a Multi-VAE^{PR} 
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:40,代码来源:split_train_validation_test_VAE_CF.py

示例12: top_n_indexes

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def top_n_indexes(arr, n):
    # https://gist.github.com/tomerfiliba/3698403
    try:
        import bottleneck
        idx = bottleneck.argpartition(arr, arr.size-n, axis=None)[-n:]
    except:
        idx = np.argpartition(arr, arr.size - n, axis=None)[-n:]
    width = arr.shape[1]
    return [divmod(i, width) for i in idx] 
开发者ID:pelednoam,项目名称:mmvt,代码行数:11,代码来源:utils.py

示例13: __random_deletion

# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import argpartition [as 别名]
def __random_deletion(self, data, bool_array, msr_array, choice):
        indices = np.where(bool_array)[0]
        n = int(math.ceil(len(msr_array) * self.alpha))
        max_msr_indices = bn.argpartition(msr_array, len(msr_array) - n)[-n:]
        i = indices[np.random.choice(max_msr_indices)]
        bool_array[i] = False 
开发者ID:padilha,项目名称:biclustlib,代码行数:8,代码来源:cca.py


注:本文中的bottleneck.argpartition方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。