当前位置: 首页>>代码示例>>Python>>正文


Python statistics.mean函数代码示例

本文整理汇总了Python中statistics.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: show_result2

def show_result2():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    GPDALC = []
    GPWALC = []
    MSWALC = []
    MSDALC = []
    for el in fd_list:
        if(el.SCHOOL=='GP'):
            GPWALC.append(el.WALC)
            GPDALC.append(el.DALC)
        elif(el.SCHOOL=='MS'):
            MSWALC.append(el.WALC)
            MSDALC.append(el.DALC)
        else:
            print("School error")

    mean_GPWALC = statistics.mean(GPWALC)
    mean_GPDALC = statistics.mean(GPDALC)

    mean_MSWALC = statistics.mean(MSWALC)
    mean_MSDALC = statistics.mean(MSDALC)

    # Prepare data for google charts
    data = [['GP School Workday Alcohol Consumption', mean_GPDALC], ['GP School Weekend  Alcohol Consumption', mean_GPWALC],
            ['MS School Workday Alcohol Consumption', mean_MSDALC], ['MS School Weekend Alcohol Consumption', mean_MSWALC]]
    return render_template('result2.html', data=data)
开发者ID:Imielin,项目名称:git,代码行数:28,代码来源:dataeng.py

示例2: sample

    def sample(self, borrowers, threshold, n_iterations=1000, eps=0.0001, target=None):
        """

        :param borrowers: list of borrower (and information about them)
        :type borrowers: list[Borrower]

        :param threshold: big losses threshold
        :type threshold: float

        :param n_iterations: number of simulations
        :type n_iterations: int

        :return:
        """
        weights_matrix, independent_weight, losses, vitality = self.get_parameters(borrowers)
        res = []
        iteration = 0
        for iteration in range(n_iterations):
            res.append(self.one_loss(weights_matrix, independent_weight, losses, vitality, threshold))
            if iteration > 100 and target is not None and abs(target - mean(res)) < eps:
                break
            elif iteration > 100 and (max(res) - min(res)) / (iteration ** 0.5) < eps:
                break
        print("TwoStepSampler break after {} iterations".format(iteration))

        return mean(res)
开发者ID:DaryaPopova,项目名称:diplom,代码行数:26,代码来源:two_step_sampler.py

示例3: process_result

	def process_result(self, t_frame, r_frame):
		print(t_frame, r_frame) 
		
		try:
			stat_const = float(2.776)

			res2 = [] # frame transmission 
			res3 = [] # throughput 
			for i in range(int(self.T[0])):
				# frame transmission
				res2.append(t_frame[i]/r_frame[i])
				res3.append(self.F * r_frame[i] / self.R)

			# print(res2, res3)

			avg_res2 = statistics.mean(res2)
			sd2 = statistics.stdev(res2)
			dif2 = sd2/math.sqrt(int(self.T[0]))*stat_const
			upper_bound2 = avg_res2 + dif2 
			lower_bound2 = avg_res2 - dif2 

			avg_res3 = statistics.mean(res3)
			sd3 = statistics.stdev(res3)
			dif3 = sd3/math.sqrt(int(self.T[0]))*stat_const
			upper_bound3 = avg_res3 + dif3
			lower_bound3 = avg_res3 - dif3 

		except ZeroDivisionError: 
			return float("inf"), float("inf"), float("inf"), 0, 0, 0

		return avg_res2, lower_bound2, upper_bound2, avg_res3, lower_bound3, upper_bound3 
开发者ID:Hank-TNguyen,项目名称:W16,代码行数:31,代码来源:Simulation.py

示例4: mean_dev

def mean_dev(training_set):
    '''
    Calculates and returns the mean and standard deviation to the classes yes and no of a given training set
    '''
    class_yes = []
    class_no = []
    mean_yes = {}
    mean_no = {}
    dev_yes = {}
    dev_no = {}
    for key in training_set[0]:
        for i in range(len(training_set)):
            if training_set[i]['DiabetesClass'] == 'yes':
                class_yes.append(training_set[i][key])
            else:
                class_no.append(training_set[i][key])
        if not key == 'DiabetesClass':
            mean_yes[key] = statistics.mean(class_yes)
            mean_no[key] = statistics.mean(class_no)
            dev_yes[key] = statistics.stdev(class_yes)
            dev_no[key] = statistics.stdev(class_no)
        else:
            prob_yes = float(len(class_yes) / len(training_set))
            prob_no = float(len(class_no) / len(training_set))
        class_yes = []
        class_no = []
    return mean_yes, mean_no, dev_yes, dev_no, prob_yes, prob_no
开发者ID:pedrotst,项目名称:trab1-ai,代码行数:27,代码来源:old_main.py

示例5: get_parts_closeness

def get_parts_closeness(part1, part2) -> float:
    part1_distances = part1.distances
    part2_distances = part2.distances
    mean1 = statistics.mean(part1_distances)
    mean2 = statistics.mean(part2_distances)
    difference = abs(mean1 - mean2)
    return difference
开发者ID:notnami,项目名称:signify,代码行数:7,代码来源:substring_parser.py

示例6: main

def main(graph, nbk, delta_max, mu, max_eval, iter, move_operator, tabuSize, logsPath):
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    fh = logging.FileHandler(logsPath + "/tabusearch.log")
    fh.setLevel(logging.INFO)
    frmt = logging.Formatter('%(message)s')
    fh.setFormatter(frmt)
    log.addHandler(fh)

    all_num_evaluations = []
    all_best_score = []
    all_time = []
    log.info("-------RUNNING TABU SEARCH-------")
    for i in range(iter):
        start = timeit.default_timer()
        num_evaluations, best_score, best = test_file_tabusearch(graph, nbk, delta_max, mu, max_eval, move_operator, tabuSize)
        stop = timeit.default_timer()
        log.debug('time : %f' % (stop - start))
        all_num_evaluations.append(num_evaluations)
        all_best_score.append(best_score)
        all_time.append(stop - start)
    log.info("nbS = %d; nbK = %d; delta_max = %d; mu = %r; move_operator= %s; tabu_maxsize = %d" % (graph.get_nbVertices(), nbk, delta_max, mu, move_operator.__name__, tabuSize))
    log.info("for %d iteration with %d max_evaluations each, "
             "\n best score found is %d,"
             "\n total time in sec : %r"
             "\n mean time in sec : %r,"
             "\n mean best_score : %r, EcT : %r"
             "\n mean num_eval : %r"
             % (iter,
                max_eval,
                min(score for score in all_best_score),
                sum(all_time),
                statistics.mean(all_time),
                statistics.mean(all_best_score), statistics.stdev(all_best_score),
                statistics.mean(all_num_evaluations)))
开发者ID:BaaptM,项目名称:project_metaheuristics,代码行数:34,代码来源:test_tabusearch.py

示例7: calculate_latencies

def calculate_latencies(version_dates):
    linux_latencies = latency(version_dates['linux'], OrderedDict(avo.os_to_kernel))
    set_latex_value('linuxMeanUpdateLatency', ufloat(statistics.mean(linux_latencies.values()),statistics.stdev(linux_latencies.values())))
    openssl_latencies = latency(version_dates['openssl'], OrderedDict(avo.os_to_project['openssl']))
    set_latex_value('opensslMeanUpdateLatency', ufloat(statistics.mean(openssl_latencies.values()),statistics.stdev(openssl_latencies.values())))
    bouncycastle_latencies = latency(version_dates['bouncycastle'], OrderedDict(avo.os_to_project['bouncycastle']))
    set_latex_value('bouncycastleMeanUpdateLatency',ufloat(statistics.mean(bouncycastle_latencies.values()),statistics.stdev(bouncycastle_latencies.values())))
开发者ID:ucam-cl-dtg,项目名称:paper-da-securityupdates,代码行数:7,代码来源:versions.py

示例8: nutritionfacts

    def nutritionfacts(self):

        # print keys
        svgdata = ""
        frame_x = self.width * self.bins + 100 - 90
        frame_y = (self.graphheight + 700) // 2 + 25 - self.graphheight
        for i, s in enumerate([l for l in self.points if l[2]]):
            mu = "μ = —"
            sigma = "σ = —"
            if len(s[0]) != 0:
                xmean = stat.mean([t[0] for t in s[0]])
                xsigma = stat.pstdev([t[0] for t in s[0]], xmean)

                ymean = stat.mean([t[1] for t in s[0]])
                ysigma = stat.pstdev([t[1] for t in s[0]], ymean)

                mu = "μ = (" + str(round(xmean, 4)) + ", " + str(round(ymean, 4)) + ")"
                sigma = "σ = (" + str(round(xsigma, 4)) + ", " + str(round(ysigma, 4)) + ")"

            line_y = frame_y + i * 65
            svgdata += circle(frame_x - 4, line_y + 3, 2, s[1])
            svgdata += circle(frame_x + 4, line_y + 4, 2, s[1])
            svgdata += circle(frame_x - 1, line_y + 10, 2, s[1])

            svgdata += text(frame_x + 20, line_y + 10, s[2], align=-1, color=s[1], font="Neue Frutiger 65")
            svgdata += text(frame_x + 28, line_y + 25, "n = " + str(len(s[0])), align=-1, color=s[1])
            svgdata += text(frame_x + 28, line_y + 40, mu, align=-1, color=s[1])

            svgdata += text(frame_x + 28, line_y + 55, sigma, align=-1, color=s[1])
        self._frostbyte(svgdata)
开发者ID:kelvin13,项目名称:svgplot,代码行数:30,代码来源:graph.py

示例9: get_stats_window

def get_stats_window(depth_iterator, length, window_size):
    """Calculate min/max/mean and min/max windowed mean.

    Assumes the depth_iterator will fill in all the implicit zero
    entries which ``samtools depth`` may omit!

    Assumes window_size < number of values in iterator!
    """
    window = deque()
    total_cov = 0
    min_cov = None
    max_cov = 0.0

    assert 1 <= window_size <= length

    prev_pos = 0
    while len(window) < window_size:
        try:
            ref, pos, depth = next(depth_iterator)
        except NoCoverage:
            return 0, 0, 0.0, 0.0, 0.0
        except StopIteration:
            outstr = "Not enough depth values to fill %i window" % window_size
            logger.info(outstr)
            raise ValueError("%s" % outstr)
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov vals for %s position %i" % (ref,
                                                                                  pos)
        total_cov += depth
        if min_cov is None:
            min_cov = depth
        else:
            min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.append(depth)

    assert len(window) == window_size
    min_win = max_win = mean(window)
    for ref, pos, depth in depth_iterator:
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov val for %s position %i" % (ref,
                                                                                 pos)
        total_cov += depth
        min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.popleft()
        window.append(depth)
        assert len(window) == window_size
        win_depth = mean(window)
        min_win = min(min_win, win_depth)
        max_win = max(max_win, win_depth)

    mean_cov = total_cov / float(length)

    assert prev_pos == length, "Missing final coverage?"
    assert len(window) == window_size
    assert min_cov <= mean_cov <= max_cov
    assert min_cov <= min_win <= max_win <= max_cov

    return min_cov, max_cov, mean_cov, min_win, max_win
开发者ID:Grindell,项目名称:public_scripts,代码行数:60,代码来源:Fix_five_prime_CDS.py

示例10: features_present1

 def features_present1(self, othertmpft):
     a=FeatureFinder()
     a.train(othertmpft)
     j=a.scan_data(othertmpft)
     features=list()
     dre=self.dict_process(othertmpft)
     sendback=list()
     final_list=list()
     del j[0]
     del j[len(j)-1]
     for i in j:
         #print(i.location)
         if i.location<2:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-1:i.location+3])))
         else:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-2:i.location+2])))
     for i in self.outline:
        if type(i)==Feature:features.append(i)
     for i in features:
         if len(final_list)>0:l=min(final_list, key=lambda x: abs(i.loc-x.loc))
         else:return [0]*len(self.outline)
         dis=len(othertmpft)-abs(i.loc-l.loc)
         penalize_by=dis/len(othertmpft)
         #print(penalize_by)
         sendback.append(statistics.mean([penalize_by, self.t(abs(i.lo-l.lo))]))
   #  print(sendback)
     #print("I am features1")
     return self.find_outliar(sendback)            
开发者ID:GGGG1020,项目名称:KineticEEG,代码行数:28,代码来源:SLICERZ.py

示例11: scan

 def scan(self):
     dre=list()
     final=list()
     dre=self.dict_process(self.data)
     pol=[]
     oo=list()
     for d in self.listy:
         r=self.__process(d)
         if len(r[1])<2 and not len(r[1])==0:pol.append(statistics.mean(r[1]))
         elif len(r[1])==0:pass
         else:pol.append(statistics.mean(r[1]))
       #  print(pol)
     for i in range(len(pol)):
         final.append(Slope(self.download[i].location, pol[i]))
        ## print(final)
     del self.download[0]
     del self.download[-1]
     last=1
     for i in range(len(self.download)):
         try:
             final.insert(i+last, Feature(self.download[i].location, statistics.mean(dre[self.download[i].location-2:self.download[i].location+2])))
         except statistics.StatisticsError:
             #del  final[i-1]
             pass
         last+=1
       #  print(final)
     self.outline=final
开发者ID:GGGG1020,项目名称:KineticEEG,代码行数:27,代码来源:SLICERZ.py

示例12: insertNormalizedModelInDB

def insertNormalizedModelInDB(idUser, idString, keystroke, isTest = False):
	insertNormalizedRecord = replaceIfIsTest("INSERT INTO `mdl_user#isTest_keystroke_normalized`(`id_user`, `id_string`) VALUES (%s, %s)", isTest);
	updateNormalizedRecord = replaceIfIsTest("UPDATE `mdl_user#isTest_keystroke_normalized` ", isTest);
	
		
	executeSqlInDB(insertNormalizedRecord, (idUser, idString));
	
	keyDimensionsExtractor = KeystrokeDimensionsExtractor(keystroke);
	
	#extracting dimensions
	timePressed = keyDimensionsExtractor.getTimePressed();
	#geting avarage and standardDeviation
	timePressedAverage = statistics.mean(timePressed);
	timePressedstandardDeviation = statistics.pstdev(timePressed);
	
	latencies = keyDimensionsExtractor.getLatencies();
	latenciesAverage = statistics.mean(latencies);
	latenciesStandardDeviation = statistics.pstdev(latencies);
	
	dbModel = {
		'id_user': idUser,
		'id_string': idString,
		'press_average': timePressedAverage,
		'latency_avarage': latenciesAverage,
		'press_standard_deviation': timePressedstandardDeviation,
		'latency_standard_deviation': latenciesStandardDeviation,
	}
	
	#update in table created before
	updateNormalizedRecord = updateNormalizedRecord + (" SET `press_average`= %(press_average)s,`latency_avarage`= %(latency_avarage)s, `press_standard_deviation`= %(press_standard_deviation)s,`latency_standard_deviation`= %(latency_standard_deviation)s " 
		" WHERE `id_user`= %(id_user)s AND `id_string`= %(id_string)s");
	executeSqlInDB(updateNormalizedRecord, dbModel);
开发者ID:MarcoASCruz,项目名称:LearningPython,代码行数:32,代码来源:validate.py

示例13: csv_dict_reader

def csv_dict_reader(file_obj):
    """
    Read a CSV file using csv.DictReader
    """
    reader = csv.DictReader(file_obj, delimiter=',')
    num_likes = []
    num_comments = []
    num_shares = []
    for line in reader:
        p = int(line["num_likes"])
        q = int(line["first_page_comment"])
        r = int(line["comments_beyond_pageone"])
        num_likes.append(p)
        num_comments.append(q)
        num_shares.append(r)
    mean_num_likes = statistics.mean(num_likes)
    stdev_num_likes = statistics.stdev(num_likes)
    mean_num_comments = statistics.mean(num_comments)
    stdev_num_comments = statistics.stdev(num_comments)
    mean_num_shares = statistics.mean(num_shares)
    stdev_num_shares = statistics.stdev(num_shares)
    covariance_likes = stdev_num_likes / mean_num_likes
    covariance_comments = stdev_num_comments / mean_num_comments
    covariance_shares = stdev_num_shares / mean_num_shares
    w = csv.writer(open("svm_dataset.csv","a"),delimiter=',',quoting=csv.QUOTE_ALL)
    
    w.writerow([mean_num_likes,stdev_num_likes,covariance_likes,mean_num_comments,stdev_num_comments,covariance_comments,mean_num_shares,stdev_num_shares,covariance_shares])
开发者ID:envious777,项目名称:Brand-Valuation-using-Social-Media-Data,代码行数:27,代码来源:save.py

示例14: show_result3

def show_result3():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    MDALC = []
    MWALC = []
    FWALC = []
    FDALC = []
    for el in fd_list:
        if(el.SEX=='M'):
            MWALC.append(el.WALC)
            MDALC.append(el.DALC)
        elif(el.SEX=='F'):
            FWALC.append(el.WALC)
            FDALC.append(el.DALC)
        else:
            print("Sex error")

    mean_MWALC = statistics.mean(MWALC)
    mean_MDALC = statistics.mean(MDALC)

    mean_FWALC = statistics.mean(FWALC)
    mean_FDALC = statistics.mean(FDALC)

    # Prepare data for google charts
    data = [['Female Workday Alcohol Consumption', mean_FDALC], ['Female Weekend  Alcohol Consumption', mean_FWALC],
            ['Male Workday Alcohol Consumption', mean_MDALC], ['Male Weekend Alcohol Consumption', mean_MWALC]]
    return render_template('result3.html', data=data)
开发者ID:Imielin,项目名称:git,代码行数:28,代码来源:dataeng.py

示例15: threshold

def threshold(imageArray):
    balanceAr=[]
    newAr = imageArray
    
    #averages each pixle's RGB values
    for evryRow in imageArray:
        for evryPix in evryRow:
            avgNum = mean(evryPix[:3])
            balanceAr.append(avgNum)
        
    #averages all pixle averages
    balance = mean(balanceAr)
    for evryRow in newAr:
        for evryPix in evryRow:
            #brighter pixles are made white
            if mean(evryPix[:3]) > balance:
                evryPix[0] = 255
                evryPix[1] = 255
                evryPix[2] = 255
            #darker pixles made black
            else:
                evryPix[0] = 0
                evryPix[1] = 0
                evryPix[2] = 0
    return newAr
开发者ID:adamwe1,项目名称:AdamWendlerProj3,代码行数:25,代码来源:SVCMethod.py


注:本文中的statistics.mean函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。