当前位置: 首页>>代码示例>>Python>>正文


Python numpy.average方法代码示例

本文整理汇总了Python中numpy.average方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.average方法的具体用法?Python numpy.average怎么用?Python numpy.average使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.average方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reduce_fit

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
开发者ID:romanorac,项目名称:discomll,代码行数:24,代码来源:distributed_random_forest.py

示例2: compute_mean_ci

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def compute_mean_ci(interp_sens, confidence = 0.95):
    sens_mean = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_lb   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_up   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    
    Pz = (1.0-confidence)/2.0
    print(interp_sens.shape)
    for i in range(interp_sens.shape[1]):
        # get sorted vector
        vec = interp_sens[:,i]
        vec.sort()

        sens_mean[i] = np.average(vec)
        sens_lb[i] = vec[int(math.floor(Pz*len(vec)))]
        sens_up[i] = vec[int(math.floor((1.0-Pz)*len(vec)))]

    return sens_mean,sens_lb,sens_up 
开发者ID:uci-cbcl,项目名称:DeepLung,代码行数:19,代码来源:noduleCADEvaluationLUNA16.py

示例3: ensemble_image

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def ensemble_image(files, dirs, ensembling_dir, strategy):
    for file in files:
        images = []
        for dir in dirs:
            file_path = os.path.join(dir, file)
            if os.path.exists(file_path):
                images.append(imread(file_path, mode='L'))
        images = np.array(images)

        if strategy == 'average':
            ensembled = average_strategy(images)
        elif strategy == 'hard_voting':
            ensembled = hard_voting(images)
        else:
            raise ValueError('Unknown ensembling strategy')
        imsave(os.path.join(ensembling_dir, file), ensembled) 
开发者ID:killthekitten,项目名称:kaggle-carvana-2017,代码行数:18,代码来源:ensemble_cpu.py

示例4: pprint

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def pprint(self, name, window=None, prefix=None):
        str_losses = []
        for key, loss in self.losses.items():
            if loss is None:
                continue
            aver_loss = np.average(loss) if window is None else np.average(loss[-window:])
            if 'nll' in key:
                str_losses.append('{} PPL {:.3f}'.format(key, np.exp(aver_loss)))
            else:
                str_losses.append('{} {:.3f}'.format(key, aver_loss))


        if prefix:
            return '{}: {} {}'.format(prefix, name, ' '.join(str_losses))
        else:
            return '{} {}'.format(name, ' '.join(str_losses)) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:18,代码来源:main.py

示例5: validate_rl

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def validate_rl(dialog_eval, ctx_gen, num_episode=200):
    print("Validate on training goals for {} episode".format(num_episode))
    reward_list = []
    agree_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    for _ in range(num_episode):
        ctxs = ctx_gen.sample()
        conv, agree, rewards = dialog_eval.run(ctxs)
        true_reward = rewards[0] if agree else 0
        reward_list.append(true_reward)
        agree_list.append(float(agree if agree is not None else 0.0))
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])
    results = {'sys_rew': np.average(reward_list),
               'avg_agree': np.average(agree_list),
               'sys_sent_unique': sent_metric.value(),
               'sys_unique': word_metric.value()}
    return results 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:23,代码来源:main.py

示例6: record_rl_task

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def record_rl_task(n_epsd, dialog, goal_gen, rl_f):
    conv_list = []
    reward_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    print("Begin RL testing")
    cnt = 0
    for g_key, goal in goal_gen.iter(1):
        cnt += 1
        conv, success = dialog.run(g_key, goal)
        true_reward = success
        reward_list.append(true_reward)
        conv_list.append(conv)
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])

    # json.dump(conv_list, text_f, indent=4)
    aver_reward = np.average(reward_list)
    unique_sent_num = sent_metric.value()
    unique_word_num = word_metric.value()
    rl_f.write('{}\t{}\t{}\t{}\n'.format(n_epsd, aver_reward, unique_sent_num, unique_word_num))
    rl_f.flush()
    print("End RL testing") 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:27,代码来源:record.py

示例7: _sim_tdcs_pair

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def _sim_tdcs_pair(mesh, cond, ref_electrode, el_surf, el_c, units, solver_options):
    logger.info('Simulating electrode pair {0} - {1}'.format(
        ref_electrode, el_surf))
    S = FEMSystem.tdcs(mesh, cond, [ref_electrode, el_surf], [0., 1.],
                       solver_options=solver_options)
    v = S.solve()
    v = mesh_io.NodeData(v, name='v', mesh=mesh)
    flux = np.array([
        _calc_flux_electrodes(v, cond,
                              [el_surf - 1000, el_surf - 600,
                               el_surf - 2000, el_surf - 1600],
                              units=units),
        _calc_flux_electrodes(v, cond,
                              [ref_electrode - 1000, ref_electrode - 600,
                               ref_electrode - 2000, ref_electrode - 1600],
                              units=units)])
    current = np.average(np.abs(flux))
    error = np.abs(np.abs(flux[0]) - np.abs(flux[1])) / current
    logger.info('Estimated current calibration error: {0:.1%}'.format(error))
    return el_c / current * v.value 
开发者ID:simnibs,项目名称:simnibs,代码行数:22,代码来源:fem.py

示例8: _lp_variables

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def _lp_variables(l, target_mean, max_total_current, max_el_current):
        n = l.shape[1]
        if max_el_current is None and max_total_current is None:
            raise ValueError(
                'max_el_current and max_total_current can be simultaneously None')
        if max_total_current is not None:
            A_ub = [np.ones((1, 2 * n))]
            b_ub = [2 * max_total_current]
        else:
            A_ub = []
            b_ub = []
        #Constraint on target intensity
        l_ = np.hstack([l, -l])
        # the LP will maximize the average of all targets, and limit the electric field
        # at each individual target
        l_avg = np.average(l_, axis=0)
        A_ub = np.vstack(A_ub + [l_])
        b_ub = np.hstack(b_ub + [target_mean])
        A_eq = np.hstack([np.ones((1, n)), -np.ones((1, n))])
        b_eq = np.array([0.])
        bounds = (0, max_el_current)
        return l_avg, A_ub, b_ub, A_eq, b_eq, bounds 
开发者ID:simnibs,项目名称:simnibs,代码行数:24,代码来源:optimization_methods.py

示例9: test_2_targets_field_component

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def test_2_targets_field_component(self, optimization_variables_avg):
        l, Q, A = optimization_variables_avg
        l2 = l[::-1]
        l = np.vstack([l ,l2])
        m = 2e-3
        m1 = 4e-3
        x = optimization_methods.optimize_field_component(l, max_el_current=m,
                                                          max_total_current=m1)

        l_avg = np.average(l, axis=0)
        x_sp = optimize_comp(l_avg, np.ones_like(l2), max_el_current=m, max_total_current=m1)

        assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4
        assert np.all(np.abs(x) <= m + 1e-6)
        assert np.isclose(l_avg.dot(x), l_avg.dot(x_sp),
                          rtol=1e-4, atol=1e-4)
        assert np.isclose(np.sum(x), 0) 
开发者ID:simnibs,项目名称:simnibs,代码行数:19,代码来源:test_optimization_methods.py

示例10: generate_average_feature

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def generate_average_feature(self, labels):
        #extract feature/classifier
        u_feas, fcs = self.get_feature(self.u_data) #2048, 1024

        #images of the same cluster
        label_to_images = {}
        for idx, l in enumerate(labels):
            self.label_to_images[l] = self.label_to_images.get(l, []) + [idx]
            #label_to_image: key is a label and USAGE u_data[label_to_images[key]]=key to set the new label

        # used from u_data to re-arrange them to label index array
        sort_image_by_label = list(itertools.chain.from_iterable([label_to_images[key] for key in sorted(label_to_images.keys())]))
        # USAGE u_data[sort_image_by_label] then the data is sorted according to its class label
        #calculate average feature/classifier of a cluster
        feature_avg = np.zeros((len(label_to_images), len(u_feas[0])))
        fc_avg = np.zeros((len(label_to_images), len(fcs[0])))
        for l in label_to_images:
            feas = u_feas[label_to_images[l]]
            feature_avg[l] = np.mean(feas, axis=0)
            fc_avg[l] = np.mean(fcs[label_to_images[l]], axis=0)
        return u_feas, feature_avg, label_to_images, fc_avg   # [m 2048], [c 2018] [] [c 1024] 
开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:23,代码来源:bottom_up.py

示例11: linkage_calculation

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def linkage_calculation(self, dist, labels, penalty): 
        cluster_num = len(self.label_to_images.keys())
        start_index = np.zeros(cluster_num,dtype=np.int)
        end_index = np.zeros(cluster_num,dtype=np.int)
        counts=0
        i=0
        for key in sorted(self.label_to_images.keys()):
            start_index[i] = counts
            end_index[i] = counts + len(self.label_to_images[key])
            counts = end_index[i]
            i=i+1
        dist=dist.numpy()
        linkages = np.zeros([cluster_num, cluster_num])
        for i in range(cluster_num):
            for j in range(i, cluster_num):
                linkage = dist[start_index[i]:end_index[i], start_index[j]:end_index[j]]
                linkages[i,j] = np.average(linkage)



        linkages = linkages.T + linkages - linkages * np.eye(cluster_num)
        intra = linkages.diagonal()
        penalized_linkages = linkages + penalty * ((intra * np.ones_like(linkages)).T + intra).T
        return linkages, penalized_linkages 
开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:26,代码来源:bottom_up.py

示例12: utt_scores

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def utt_scores(scores, scp, utt2label):
    """return predictions and labels per utterance
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        preds.append(avg_scores)
        labels.append(utt2label[key])

    return np.array(preds), np.array(labels) 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:19,代码来源:v3_validation.py

示例13: compute_loss

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:22,代码来源:v3_validation.py

示例14: compute_loss

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def compute_loss(model, device, data_loader, threshold=0.5):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)
            pred = output > 0.5
            correct += pred.byte().eq(target.byte()).sum().item() # not really meaningful

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores, correct 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:24,代码来源:v1_validation.py

示例15: compute_utt_eer

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import average [as 别名]
def compute_utt_eer(scores, scp, utt2label, threshold):
    """utterance-based eer
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        if avg_scores < threshold:
            preds.append(0)
        else: preds.append(1)
        labels.append(utt2label[key])

    eer = compute_eer(labels, preds)
    confuse_mat = compute_confuse(labels, preds)
    return eer, confuse_mat 
开发者ID:jefflai108,项目名称:Attentive-Filtering-Network,代码行数:23,代码来源:v1_prediction.py


注:本文中的numpy.average方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。