当前位置: 首页>>代码示例>>Python>>正文


Python statistics.stdev函数代码示例

本文整理汇总了Python中statistics.stdev函数的典型用法代码示例。如果您正苦于以下问题:Python stdev函数的具体用法?Python stdev怎么用?Python stdev使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了stdev函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _aggregate

    def _aggregate(self):
        if not self._modified:
            return
        
        functions = defaultdict(lambda: defaultdict(list))
        for profile in filter(self._validate_profile, self._profiles.values()):
            for function, values in profile["functions"].items():
                functions[function]["num_calls"].append(values["num_calls"])
                functions[function]["total_time"].append(values["total_time"])
                functions[function]["cum_time"].append(values["cum_time"])

        for function, lists in functions.items():
            self._results[function] = Aggregator.Result(
                num_calls_median = int(median(lists["num_calls"])),
                total_time_mean = mean(lists["total_time"]),
                total_time_stdev = stdev(lists["total_time"]),
                cum_time_mean = mean(lists["cum_time"]),
                cum_time_stdev = stdev(lists["cum_time"]))
        
        run_times = lambda: (p["run_time"] for p in self._profiles.values())
        self._summary = Aggregator.Summary(
            run_time_mean = mean(run_times()),
            run_time_stdev = stdev(run_times()))

        self._modified = False
开发者ID:c4fcm,项目名称:CivilServant,代码行数:25,代码来源:perftest.py

示例2: csv_dict_reader

def csv_dict_reader(file_obj):
    """
    Read a CSV file using csv.DictReader
    """
    reader = csv.DictReader(file_obj, delimiter=',')
    num_likes = []
    num_comments = []
    num_shares = []
    for line in reader:
        p = int(line["num_likes"])
        q = int(line["first_page_comment"])
        r = int(line["comments_beyond_pageone"])
        num_likes.append(p)
        num_comments.append(q)
        num_shares.append(r)
    mean_num_likes = statistics.mean(num_likes)
    stdev_num_likes = statistics.stdev(num_likes)
    mean_num_comments = statistics.mean(num_comments)
    stdev_num_comments = statistics.stdev(num_comments)
    mean_num_shares = statistics.mean(num_shares)
    stdev_num_shares = statistics.stdev(num_shares)
    covariance_likes = stdev_num_likes / mean_num_likes
    covariance_comments = stdev_num_comments / mean_num_comments
    covariance_shares = stdev_num_shares / mean_num_shares
    w = csv.writer(open("svm_dataset.csv","a"),delimiter=',',quoting=csv.QUOTE_ALL)
    
    w.writerow([mean_num_likes,stdev_num_likes,covariance_likes,mean_num_comments,stdev_num_comments,covariance_comments,mean_num_shares,stdev_num_shares,covariance_shares])
开发者ID:envious777,项目名称:Brand-Valuation-using-Social-Media-Data,代码行数:27,代码来源:save.py

示例3: process_result

	def process_result(self, t_frame, r_frame):
		print(t_frame, r_frame) 
		
		try:
			stat_const = float(2.776)

			res2 = [] # frame transmission 
			res3 = [] # throughput 
			for i in range(int(self.T[0])):
				# frame transmission
				res2.append(t_frame[i]/r_frame[i])
				res3.append(self.F * r_frame[i] / self.R)

			# print(res2, res3)

			avg_res2 = statistics.mean(res2)
			sd2 = statistics.stdev(res2)
			dif2 = sd2/math.sqrt(int(self.T[0]))*stat_const
			upper_bound2 = avg_res2 + dif2 
			lower_bound2 = avg_res2 - dif2 

			avg_res3 = statistics.mean(res3)
			sd3 = statistics.stdev(res3)
			dif3 = sd3/math.sqrt(int(self.T[0]))*stat_const
			upper_bound3 = avg_res3 + dif3
			lower_bound3 = avg_res3 - dif3 

		except ZeroDivisionError: 
			return float("inf"), float("inf"), float("inf"), 0, 0, 0

		return avg_res2, lower_bound2, upper_bound2, avg_res3, lower_bound3, upper_bound3 
开发者ID:Hank-TNguyen,项目名称:W16,代码行数:31,代码来源:Simulation.py

示例4: good_stdev

    def good_stdev(self, current_offer):
        if self.counter < 5:
            return False

        # array of utilities the opponent would get for their offer
        past_utils = [self.utility_for(x) for x in self.opponent_offers]
        old_stdev = statistics.stdev(past_utils)
        old_mean = statistics.mean(past_utils)

        if past_utils[-1] < self.penalty:
            return False

        new_utils = []
        # filter outliers (2 standard deviations above or below)
        for u in past_utils:
            if old_mean - 2*old_stdev < u < old_mean + 2*old_stdev:
               new_utils.append(u)

        if len(new_utils) < 2:
            return False

        # if the utility we get for the offer is greater than the mean + 1 std dev, then return True
        offer_utility = self.utility_for(current_offer)
        new_stdev = statistics.stdev(new_utils)
        new_mean = statistics.mean(new_utils)
        return offer_utility > new_mean + new_stdev
开发者ID:nealp9084,项目名称:hw3,代码行数:26,代码来源:door_in_face_negotiator.py

示例5: pCalc

def pCalc (movMat, movNumber, n, reviewers):
      
    xVals = [int(x) for i,x in enumerate(movMat[movNumber][1].split(';')) if i in reviewers]
    yVals = [int(x) for i,x in enumerate(movMat[n][1].split(';')) if i in reviewers]
    
    xi = sum(xVals) #get first movie values
    average1 = xi/len(reviewers)
    stdDev1 = statistics.stdev(xVals)
  
    yi = sum(yVals) #get second movie values
    average2 = yi/len(yVals)
    stdDev2 = statistics.stdev(yVals)

    r = 0 		#get r value
    newSum = [((x - average1) / stdDev1) * ((y-average2)/stdDev2) for x,y in zip(xVals, yVals)]
    r = (1/(len(reviewers)-1))*sum(newSum)    
    
    review = []		#append all values to the list
    review.append(r)
    review.append(average1)
    review.append(average2)
    review.append(stdDev1)
    review.append(stdDev2)
    review.append(n)
    review.append(len(reviewers))
         
    return review 
开发者ID:VoR0220,项目名称:Artificial-Intelligence-Python,代码行数:27,代码来源:hw4.py

示例6: run_simulation

def run_simulation(init_duration, init_stake, samples, player):
    """ Run simulation, print the result to stdout

    """
    wheel = create_wheel()
    table = Table(wheel)
    game = RouletteGame(wheel, table)
    simulator = Simulator(game, player,
                          init_duration=init_duration, samples=samples,
                          init_stake=init_stake)
    simulator.gather()
    durations = simulator.durations
    maxima = simulator.maxima
    print(player)
    print()
    print("Durations")
    print("  min :", min(durations))
    print("  max :", max(durations))
    print("  mean: %.2f" % statistics.mean(durations))
    print("  dev : %.2f" % statistics.stdev(durations))
    print("Maxima")
    print("  min :", min(maxima))
    print("  max :", max(maxima))
    print("  mean: %.2f" % statistics.mean(maxima))
    print("  dev : %.2f" % statistics.stdev(maxima))
开发者ID:yannicklm,项目名称:pyroulette,代码行数:25,代码来源:simulator.py

示例7: replacePearsonPvalueWithZscore

def replacePearsonPvalueWithZscore():
    all_sample_data={}
    for tissue in tissue_comparison_scores:
        for (r,p,sample) in tissue_comparison_scores[tissue]:
            all_sample_data[sample] = [] ### populate this dictionary and create sub-dictionaries
        break

    for tissue in tissue_comparison_scores:
        for (r,p,sample) in tissue_comparison_scores[tissue]:
            all_sample_data[sample].append(r)

    sample_stats={}
    all_dataset_rho_values=[]
    ### Get average and standard deviation for all sample rho's
    for sample in all_sample_data:
        all_dataset_rho_values+=all_sample_data[sample]
        avg=statistics.avg(all_sample_data[sample])
        stdev=statistics.stdev(all_sample_data[sample])
        sample_stats[sample]=avg,stdev
    
    global_rho_avg = statistics.avg(all_dataset_rho_values)
    global_rho_stdev = statistics.stdev(all_dataset_rho_values)
    
    ### Replace the p-value for each rho
    for tissue in tissue_comparison_scores:
        scores = []
        for (r,p,sample) in tissue_comparison_scores[tissue]:
            #u,s=sample_stats[sample]
            #z = (r-u)/s
            z = (r-global_rho_avg)/global_rho_stdev ### Instead of doing this for the sample background, do it relative to all analyzed samples
            scores.append([r,z,sample])
        tissue_comparison_scores[tissue] = scores
开发者ID:wuxue,项目名称:altanalyze,代码行数:32,代码来源:LineageProfiler.py

示例8: calc_evalmetrics

    def calc_evalmetrics(self):
        """Calculate evaluation metrics: 
           Tour Recall, Tour Precision, Tour F1-score, RMSE of POI visit duration
           Tour Popularity, Tour Interest, Popularity and Interest Rank
        """
        assert(len(self.recommendSeqs) > 0)

        # calculate intersection size of recommended POI set and real POI set
        intersize = dict()
        for k, v in self.recommendSeqs.items():
            intersize[k] = len(set(v) & set(self.sequences[k]))

        # calculate tour recall
        recalls = []
        for k, v in intersize.items():
            recalls.append(v / len(self.sequences[k]))

        # calculate tour precision
        precisions = []
        for k, v in intersize.items():
            precisions.append(v / len(self.recommendSeqs[k]))

        # calculate F1-score
        f1scores = []
        assert(len(recalls) == len(precisions))
        for i in range(len(recalls)):
            f1scores.append(2 * precisions[i] * recalls[i] / (precisions[i] + recalls[i]))

        print('Recall:   ', stat.mean(recalls),    stat.stdev(recalls))
        print('Precision:', stat.mean(precisions), stat.stdev(precisions))
        print('F1-score: ', stat.mean(f1scores),   stat.stdev(f1scores))
开发者ID:gitter-badger,项目名称:digbeta,代码行数:31,代码来源:ijcai15.py

示例9: mean_dev

def mean_dev(training_set):
    '''
    Calculates and returns the mean and standard deviation to the classes yes and no of a given training set
    '''
    class_yes = []
    class_no = []
    mean_yes = {}
    mean_no = {}
    dev_yes = {}
    dev_no = {}
    for key in training_set[0]:
        for i in range(len(training_set)):
            if training_set[i]['DiabetesClass'] == 'yes':
                class_yes.append(training_set[i][key])
            else:
                class_no.append(training_set[i][key])
        if not key == 'DiabetesClass':
            mean_yes[key] = statistics.mean(class_yes)
            mean_no[key] = statistics.mean(class_no)
            dev_yes[key] = statistics.stdev(class_yes)
            dev_no[key] = statistics.stdev(class_no)
        else:
            prob_yes = float(len(class_yes) / len(training_set))
            prob_no = float(len(class_no) / len(training_set))
        class_yes = []
        class_no = []
    return mean_yes, mean_no, dev_yes, dev_no, prob_yes, prob_no
开发者ID:pedrotst,项目名称:trab1-ai,代码行数:27,代码来源:old_main.py

示例10: bootstrap_test

    def bootstrap_test(self, nsamples=100, noise=0.2):
        """Returns mean and std. dev. of successful recognitions."""
        boot = {}
        for vec, pat in zip(self.pattern_vectors, self.patterns):
            boot[pat] = {"closest": [], "iterations": [], "full_matches": [], "accuracy": []}
            for sample in range(nsamples):
                recalled, noisy, iterations = self.recall_noisy(vec, noise=noise)
                self.show_pattern(noisy, "{}_{}_noisy_{}".format(
                    noise, pat, sample))
                self.show_pattern(recalled,      "{}_{}_recalled_{}".format(
                    noise, pat, sample))

                # equal to any patterns?
                matches = {}
                full_match = None
                for vec2, pat2 in zip(self.pattern_vectors, self.patterns):
                    matches[pat2] = list( \
                            vec2[0] == recalled[0]).count(True)
                    if matches[pat2] == vec2.size:
                        full_match = pat2

                boot[pat]["iterations"].append(iterations)
                boot[pat]["full_matches"].append(full_match)
                boot[pat]["closest"].append(pat == max(matches, key=matches.get))
                boot[pat]["accuracy"].append(matches[pat] / vec.size)
            boot[pat]["iterations"] = (mean(boot[pat]["iterations"]), stdev(boot[pat]["iterations"]))
            boot[pat]["accuracy"] = (mean(boot[pat]["accuracy"]), stdev(boot[pat]["accuracy"]))

            count_matches = lambda l: len(list(filter(lambda f: not f is None, l)))

            boot[pat]["full_matches"] = count_matches(boot[pat]["full_matches"])
            boot[pat]["closest"] = count_matches(boot[pat]["closest"])
        return boot
开发者ID:hucal,项目名称:hopfield_tests,代码行数:33,代码来源:tests.py

示例11: addDataToPlt

def addDataToPlt(fig, ax, dates, diff, c = 'c', label="raw", isMultiple=True):
    assert len(dates) == len(diff), "Plot and data are of different lenght"
    label1 = "average of 3"
    label2 = "average of 7"

    med3 = [i for i in diff]
    med7 = [i for i in diff]
    for i in range(3, len(diff) - 4):
        if i > 2 and i < len(diff) - 4:
            med7[i] = stats.median(diff[i-3:i+3])
        if i > 0 and i < len(diff) - 2:
            med3[i] = stats.median(diff[i-1:i+2])
        
    marker = "o"
    if len(diff) > 200:
        marker = "."
    if not isMultiple:
        if len(diff) > 1 and stats.stdev(diff) > 0.1:
            logger.error("Why do you have a high stdev?" + str(stats.stdev(diff)))
            marker = "x"

    ax.plot_date(dates, diff, c, xdate=True, marker = marker, linestyle="", label=label)
    if isMultiple:
        ax.plot_date(dates, med3, 'b', xdate=True, marker = ".", linestyle="", label=label1)
        ax.plot_date(dates, med7, 'r', xdate=True, marker = ".", linestyle="", label=label2)
    ax.xaxis.set_major_locator(matplotlib.dates.HourLocator())
    ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:00"))
    ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator())
    ax.autoscale_view()
    fig.autofmt_xdate()
    ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
开发者ID:nibrivia,项目名称:atlas,代码行数:31,代码来源:graph.py

示例12: model_analysis

def model_analysis(x, x_matrix, y, line, y_hat, b):
    n = len(x) # number of samples
    s_x = stats.stdev(x) # standard deviation of x values
    s_y = stats.stdev(y) # standard deviation of y values
    s2_x = stats.variance(x) # variance of x values
    s2_y = stats.variance(y) # variance of y values
    s_xy = b * s2_x # covariance of VM
    
    mad_temp = 0
    SSE = 0
    for i in range(len(y)):
        temp = abs(y[i] - y_hat[i])
        mad_temp += temp
        SSE += temp**2 # sum of squares for error
    MAD = mad_temp / n    
    s_err = math.sqrt(SSE / (n - 2)) # standard error of estimate
    s_b = s_err / math.sqrt((n - 1) * s2_x)
    
    r = s_xy / (s_x * s_y) # sample coefficient of correlation
    R_2 = line.score(x_matrix, y) # coefficient of determination 
    R_2calc = s_xy**2 / (s2_x * s2_y)
    t = b / s_b # t-value for slope assuming true slope = 0
    
    f1.write('\nSkew = ' + str(b) + '\n')
    f1.write('Coefficient of correlation (r) = ' + str(r) + '\n')
    #f1.write('Coefficient of determination (R^2) via scikit = ' + str(R_2) + '\n')
    f1.write('Coefficient of determination (R^2) calculate = ' + str(R_2calc) + '\n')
    f1.write('Test statistic for clock skew (t) = ' + str(t) + '\n')
    f1.write('Mean Absolute Deviation (MAD) = ' + str(MAD) + '\n')
    f1.write('Sum of Squares for Forecast Error (SSE) = ' + str(SSE) + '\n')
    
    return
开发者ID:cjwasek,项目名称:Traffic-Analysis-Project,代码行数:32,代码来源:SingleVM_Anal_v2.py

示例13: main

def main(y):
    print("Give me",y,"numbers")
    r = int(input())
    numbers =[]
    while len(numbers) < y:
        numbers.append(r)
        if len(numbers) < y:
            r = int(input("Give me another number"))
    print (numbers)
    #sum of the 10 numbers
    def function(x):
        name = x
        var = 0
        for i in numbers:
            var += i
        result = var/len(numbers)
        if name == 1:
            return var
        else:
            return result
    #Paul´s blog idea -->https://pololarinette.wordpress.com/2015/10/15/wsq10-lists/
    #Standar deviation
    import statistics
    statistics.stdev(numbers)

    print("The sum of the",y,"numbers is",function(1))
    print("The average of the",y,"numbers is",function(2))
    print("The stardart deviation is",statistics.stdev(numbers))
开发者ID:Jocapear,项目名称:TC1014,代码行数:28,代码来源:WSQ10.py

示例14: cmp

    def cmp(tweet1_counts1, tweet2_counts2):
        (tweet1, counts1) = tweet1_counts1
        (tweet2, counts2) = tweet2_counts2

        "1. Из твитов с разным количеством элементов списков более информативен тот, в котором элементов больше"
        if sum(counts1) > sum(counts2):
            return -1
        elif sum(counts2) > sum(counts1):
            return 1

        """
        2. Из твитов с одинаковой суммой количеств элементов список выбираем тот, где количества элементов более
        сбалансированы (например, [1, 1] лучше, чем [2, 0])
        """
        std1 = statistics.stdev(counts1)
        std2 = statistics.stdev(counts2)
        if std1 < std2:
            return -1
        elif std2 < std1:
            return 1

        "И, наконец, наиболее информативен твит большей длины"
        if len(tweet1) > len(tweet2):
            return -1
        elif len(tweet2) > len(tweet1):
            return 1

        return 0
开发者ID:themylogin,项目名称:twitter-overkill,代码行数:28,代码来源:utils.py

示例15: bench_throughput_latency

def bench_throughput_latency(samples=3, max_rsd=0.1,
                             *args, **kwargs):
    while True:
        throughputs = []
        latencies = []
        iteration = 1
        errors = 0
        while iteration <= samples:
            sys.stdout.write(' [*] Iteration %d/%d\r' % (iteration,samples))
            sys.stdout.flush()
            try:
                bench = Benchmark(*args, **kwargs)
                bench.run()
                throughputs.append(bench.throughput)
                latencies.append(bench.latency)
                iteration += 1
            except Exception as e:
                errors += 1
                if errors >= samples:
                    raise
                print(' [!] Iteration %d failed: %s\n%s' % \
                    (iteration, str(type(e)), str(e)))
        mean_throughput = statistics.mean(throughputs)
        mean_latency = statistics.mean(latencies)
        stdev_throughput = statistics.stdev(throughputs)
        stdev_latency = statistics.stdev(latencies)
        rsd_throughput = stdev_throughput/mean_throughput
        rsd_latency = stdev_latency/mean_latency
        if rsd_throughput > max_rsd or rsd_latency > max_rsd:
            sys.stderr.write(' [!] Discarding experiment: '+\
                'throughput RSD %.2f %%, latency RSD %.2f %%\n' %\
                (rsd_throughput*100, rsd_latency*100))
            continue
        return (mean_throughput, mean_latency,
            rsd_throughput, rsd_latency)
开发者ID:mtth-bfft,项目名称:muslkl,代码行数:35,代码来源:bench_memcached_throughput_latency.py


注:本文中的statistics.stdev函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。