当前位置: 首页>>代码示例>>Python>>正文


Python statistics.median函数代码示例

本文整理汇总了Python中statistics.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了median函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: googlemap_position

def googlemap_position(request, trackerID):
    pk = trackerID.split(":")[0]
    current_user = request.user
    horsetracker = get_object_or_404(HorseTracker, pk=pk)
    horsedata = HorseData.objects.filter(tracker=horsetracker).order_by('-date')
    if( horsetracker.user != current_user ):
        raise Http404
    
    #convert strings to float, int
    latitude = horsedata.values('latitude')
    longitude = horsedata.values('longitude')
    temperature = horsedata.values('temperature')

    
    latitude_list = [x['latitude'] for x in latitude]
    longitude_list = [x['longitude'] for x in longitude]
    temperature_list = [x['temperature'] for x in temperature]

    latCenter = statistics.median(latitude_list)#6216576
    lonCenter = statistics.median(longitude_list)#1717866

    #step = 3
    points=[ dict(latitude=str(latCenter),
                  longitude=str(lonCenter),
                  value2=str(0) )]
    
    counter = 0

    for i in range(len(latitude_list)):
        points.append( dict(latitude=str(latitude_list[i]), longitude=str(longitude_list[i])))
      
    return render(request, 'googlemap_position.html', {
                           'horsetracker': horsetracker, 
                           'horsedatas': horsedata, 
                           'points': points })
开发者ID:hummelgard,项目名称:mundilfare,代码行数:35,代码来源:views.py

示例2: parse_plaintext

def parse_plaintext(pattern):
    lines = repr(pattern).split(r'\r\n')

    x_vals = []
    y_vals = []
    board = new_array()

    cell_block_start = [i for i in range(len(lines)) if lines[i][0:1] != '!' and lines[i][0:2] != "'!"][0]

    for y in range(cell_block_start, len(lines)):
        for x in range(len(lines[y])):
            if lines[y][x] == "O":
                x_vals.append(x)
                y_vals.append(y)

    w_padding = (width - len(set(x_vals))) // 2
    h_padding = (height - len(set(y_vals))) // 2

    if floor(median(x_vals)) < w_padding:
        diff = (floor(median(x_vals)) * -1) + w_padding
        for x in range(len(x_vals.copy())):
            x_vals[x] += diff

    if floor(median(y_vals)) < h_padding:
        diff = floor((median(y_vals)) * -1) + h_padding
        for y in range(len(y_vals.copy())):
            y_vals[y] += diff

    for i in range(len(x_vals)):
        board[y_vals[i]][x_vals[i]] = 1

    game(0, load_board=board, rescan=True)
开发者ID:anden3,项目名称:Python,代码行数:32,代码来源:Game+of+life+-+2d+array.py

示例3: printLEMranks

def printLEMranks(results_file,LEM_file,fname="LEMranks.txt",use_pldLap=False,plotresults=False,title="11D malaria 40 hr 90 TF, scaling factor 0.05"):
    # use_pldLap = False or 0 means use sqrt loss / root
    if use_pldLap:
        source,target,type_reg,lem_score = fileparsers.parseLEMfile(-1,LEM_file)
    else:
        source,target,type_reg,sqrtlossdroot_score = fileparsers.parseLEMfile_sqrtlossdroot(2,LEM_file)
    totaledges = float(len(source))

    results = json.load(open(results_file,'r'))

    try:
        network_spec_str = results["Network"]
        LEMranks = getLEMranks(network_spec_str,totaledges,source,target,type_reg)
        print "Mean: {}".format(statistics.mean(LEMranks))
        print "Median: {}".format(statistics.median(LEMranks))
        print "% stable FCs: {}".format(float(results["StableFCParameterCount"])/float(results["ParameterCount"]))
        print "% pattern matches: {}".format(float(results["StableFCMatchesParameterCount"])/float(results["ParameterCount"]))
    except:
        stats=[]
        for R in results:
            network_spec_str = R["Network"]
            LEMranks = getLEMranks(network_spec_str,totaledges,source,target,type_reg)
            stats.append( (statistics.mean(LEMranks),statistics.median(LEMranks),float(R["StableFCMatchesParameterCount"])/float(R["ParameterCount"])) )
        with open(fname,'w') as sf:
            for s in stats:
                sf.write('/'.join([str(t) for t in s])+"\n")
        if plotresults:
            plotLEMranks(stats,title,use_pldLap)
开发者ID:breecummins,项目名称:NetworkBuilder,代码行数:28,代码来源:LEMranks.py

示例4: write_result_avg_rand_planner

def write_result_avg_rand_planner(problem, params, simulations, file_obj_avg):

    # About this simulation
    to_write = problem.name
    to_write += "\t{}\t{}\t{}".format(params.planner.name, params.model.__name__, params.coc)

    # Results
    avg_cost = 1.0 * sum([s.get_total_cost() for s in simulations]) /len(simulations)
    to_write += '\t' + str(avg_cost)
    avg_obs = 1.0 * sum([s.total_observations() for s in simulations])/len(simulations)
    to_write += '\t' + str(avg_obs)
    avg_msg_sent = 1.0 * sum([s.total_messages_sent() for s in simulations])/len(simulations)
    to_write += '\t' + str(avg_msg_sent)
    avg_msg_void = 1.0 * sum([s.total_messages_voided() for s in simulations])/len(simulations)
    to_write += '\t' + str(avg_msg_void)
    avg_steps = 1.0 * sum([s.total_steps() for s in simulations])/len(simulations)
    to_write += '\t' + str(avg_steps)


    # Median Results
    avg_cost = 1.0 * statistics.median([s.get_total_cost() for s in simulations])
    to_write += '\t' + str(avg_cost)
    avg_obs = 1.0 * statistics.median([s.total_observations() for s in simulations])
    to_write += '\t' + str(avg_obs)
    avg_msg_sent = 1.0 * statistics.median([s.total_messages_sent() for s in simulations])
    to_write += '\t' + str(avg_msg_sent)
    avg_msg_void = 1.0 * statistics.median([s.total_messages_voided() for s in simulations])
    to_write += '\t' + str(avg_msg_void)
    avg_steps = 1.0 * statistics.median([s.total_steps() for s in simulations])
    to_write += '\t' + str(avg_steps)

    to_write += '\t' + str(len(simulations))

    file_obj_avg.write(to_write + '\n')
    file_obj_avg.flush()
开发者ID:ksenglee,项目名称:meng_pyhop,代码行数:35,代码来源:run.py

示例5: numerical_col_processing

def numerical_col_processing(numeric_column_names, dataset_df):

    for col_name in numeric_column_names:
        value_list = list(dataset_df[col_name])
        value_list.sort()
        max_value = dataset_df[col_name].max()
        min_value = dataset_df[col_name].min()
        first_median = statistics.median(value_list)

        break_index = 0
        temp_list1 = []
        for i in value_list:
            if i < first_median:
                temp_list1.append(i)
            elif i == first_median:
                temp_list1.append(i)
                break_index = value_list.index(i)
            else:
                break
        second_median = statistics.median(temp_list1)

        temp_list2 = []
        for j in range(break_index, len(value_list)):
            temp_list2.append(value_list[j])
        third_median = statistics.median(temp_list2)

        bins = [min_value-1, second_median, first_median, third_median, max_value+1]
        group_names = [str(min_value)+"-"+str(second_median), str(second_median)+"-"+str(first_median), str(first_median)+"-"+str(third_median), str(third_median)+"-"+str(max_value)]

        dataset_df[col_name] = pd.cut(dataset_df[col_name], bins, labels=group_names)
    return dataset_df
开发者ID:rohitdandona,项目名称:Association-Rules,代码行数:31,代码来源:Dataset_Processor.py

示例6: getSpeedAfterShock

    def getSpeedAfterShock(self, time = 20, startTime = 0, after = 25, absolute = False):
        "returns direction that the rat travelled 'after' points after shock"
        time = time * 60000
        start = self.findStart(startTime)

        shocks = [content[0] for content in self.data[start:] if
                  content[5] == 2 and content[1] < time]
        if shocks:
            selected = [shocks[0]]
            prev = shocks[0]
            for shock in shocks[1:]:
                if shock - prev > after:
                    selected.append(shock)
                prev = shock
        else:
            return "NA"

        angles = []
        cx, cy = self.centerX, self.centerY
        for shock in selected:
            x1, y1 = self.data[shock][7:9]
            if len(self.data) <= shock + after:
                break
            x2, y2 = self.data[shock + after][7:9]
            angle = ((degrees(atan2(x2 - cx, y2 - cy + 0.0000001)) -
                      degrees(atan2(x1 - cx, y1 - cy + 0.0000001)) + 180) % 360) - 180
            angles.append(angle)

        if absolute:
            return format(median([abs(angle) for angle in angles]), "0.2f")
        else:
            return format(median(angles), "0.2f")
开发者ID:bahniks,项目名称:CM_Manager_0_3_5,代码行数:32,代码来源:cm.py

示例7: find_medians

    def find_medians(self):
        """
        Find true_median, high_median, low_median.
        """
        # find median
        self.true_median = statistics.median(self.data_list)

        # find low_median, high_median
        # If input list is even

        if len(self.data_list) % 2 == 0:
            low_half = [self.data_list[val] for val in
                        range(len(self.data_list) // 2 - 1)]
            top_half = [self.data_list[val2] for val2 in
                        range(len(self.data_list) // 2 + 1,
                        len(self.data_list))]
            print(low_half, top_half)
            self.low_median = statistics.median(low_half)
            self.high_median = statistics.median(top_half)

        # If input list is odd
        elif len(self.data_list) % 2 != 0:
            low_half = [self.data_list[val] for val in
                        range(len(self.data_list) // 2)]
            top_half = [self.data_list[val] for val in
                        range(len(self.data_list) // 2 + 1,
                        len(self.data_list))]
            print(low_half, top_half, sep='\n')
            self.low_median = statistics.median(low_half)
            self.high_median = statistics.median(top_half)

        return self.true_median, self.low_median, self.high_median
开发者ID:cjc77,项目名称:MathUtilities,代码行数:32,代码来源:box_and_whisker.py

示例8: get_pivot

def get_pivot(a: list):
    l = len(a)
    if l >= 9:
        return statistics.median([a[i * (l // 9)] for i in range(9)])
    if l >= 3:
        return statistics.median([a[i * (l // 3)] for i in range(3)])
    return a[0]
开发者ID:sudodoki,项目名称:prj-algo,代码行数:7,代码来源:quickSort.py

示例9: parse_life_1_06

def parse_life_1_06(pattern):
    lines = repr(pattern).split(r'\r\n')

    x_vals = []
    y_vals = []
    board = new_array()

    for line in lines[1::]:
        if len(line) > 1:
            x_vals.append(int(line.split()[0]))
            y_vals.append(int(line.split()[1]))

    w_padding = (width - len(set(x_vals))) // 2
    h_padding = (height - len(set(y_vals))) // 2

    if floor(median(x_vals)) < w_padding:
        diff = (floor(median(x_vals)) * -1) + w_padding
        for x in range(len(x_vals.copy())):
            x_vals[x] += diff

    if floor(median(y_vals)) < h_padding:
        diff = floor((median(y_vals)) * -1) + h_padding
        for y in range(len(y_vals.copy())):
            y_vals[y] += diff

    for i in range(len(x_vals)):
        board[y_vals[i]][x_vals[i]] = 1

    game(0, load_board=board, rescan=True)
开发者ID:anden3,项目名称:Python,代码行数:29,代码来源:Game+of+life+-+2d+array.py

示例10: indexStats_by_year

def indexStats_by_year(year):
    area_repo = AreaRepository(recreate_db=False, config=sqlite_config)
    indicator_repo = IndicatorRepository(recreate_db=False, config=sqlite_config)
    observation_repo = ObservationRepository(recreate_db=False, area_repo=area_repo, indicator_repo=indicator_repo,
                                             config=sqlite_config)
    index_indicator = indicator_repo.find_indicators_index()[0]
    observations = observation_repo.find_tree_observations(index_indicator.indicator, 'ALL', year, 'INDICATOR')
    areas = area_repo.find_countries(order="iso3")

    data = {'year': year, 'stats': OrderedDict()}

    for region in area_repo.find_regions():
        per_region_obs = [o for o in observations if o.value is not None
                          and o.area.iso3 in [c.iso3 for c in region.countries]]
        for indicator_code in sorted(set([o.indicator.indicator for o in observations])):
            per_indicator_obs = [o.value for o in per_region_obs if
                                 o.indicator.indicator == indicator_code and o.value is not None]
            if region.iso3 not in data['stats']:
                data['stats'][region.iso3] = OrderedDict()
            data['stats'][region.iso3][indicator_code] = OrderedDict()
            data['stats'][region.iso3][indicator_code]['mean'] = statistics.mean(per_indicator_obs)
            data['stats'][region.iso3][indicator_code]['median'] = statistics.median(per_indicator_obs)

    data['stats'][':::'] = OrderedDict()
    for indicator_code in sorted(set([o.indicator.indicator for o in observations])):
        per_indicator_obs = [o.value for o in observations if
                             o.indicator.indicator == indicator_code and o.value is not None]
        data['stats'][':::'][indicator_code] = OrderedDict()
        data['stats'][':::'][indicator_code]['mean'] = statistics.mean(per_indicator_obs)
        data['stats'][':::'][indicator_code]['median'] = statistics.median(per_indicator_obs)

    return json_response_ok(request, data)
开发者ID:TheWebFoundation,项目名称:odb-parser,代码行数:32,代码来源:api.py

示例11: addDataToPlt

def addDataToPlt(fig, ax, dates, diff, c = 'c', label="raw", isMultiple=True):
    assert len(dates) == len(diff), "Plot and data are of different lenght"
    label1 = "average of 3"
    label2 = "average of 7"

    med3 = [i for i in diff]
    med7 = [i for i in diff]
    for i in range(3, len(diff) - 4):
        if i > 2 and i < len(diff) - 4:
            med7[i] = stats.median(diff[i-3:i+3])
        if i > 0 and i < len(diff) - 2:
            med3[i] = stats.median(diff[i-1:i+2])
        
    marker = "o"
    if len(diff) > 200:
        marker = "."
    if not isMultiple:
        if len(diff) > 1 and stats.stdev(diff) > 0.1:
            logger.error("Why do you have a high stdev?" + str(stats.stdev(diff)))
            marker = "x"

    ax.plot_date(dates, diff, c, xdate=True, marker = marker, linestyle="", label=label)
    if isMultiple:
        ax.plot_date(dates, med3, 'b', xdate=True, marker = ".", linestyle="", label=label1)
        ax.plot_date(dates, med7, 'r', xdate=True, marker = ".", linestyle="", label=label2)
    ax.xaxis.set_major_locator(matplotlib.dates.HourLocator())
    ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:00"))
    ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator())
    ax.autoscale_view()
    fig.autofmt_xdate()
    ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
开发者ID:nibrivia,项目名称:atlas,代码行数:31,代码来源:graph.py

示例12: resetPass

def resetPass(customCommand,test=False):
	from random import sample as randomize
	from random import random
	from os.path import exists
	# Opens the Adj, Adv, and Noun files as arrays
	av = open(sys.path[0]+"/Adv").read().splitlines()
	aj = open(sys.path[0]+"/Adj").read().splitlines()
	nn = open(sys.path[0]+"/Noun").read().splitlines()
	# Just for fun, some statistics!
	totalCombos = len(av)*len(aj)*len(nn)
	combosFormatted = "{:,}".format(totalCombos)
	avLengths=[]
	for item in av:
		avLengths.append(len(item))
	ajLengths=[]
	for item in aj:
		ajLengths.append(len(item))
	nnLengths=[]
	for item in nn:
		nnLengths.append(len(item))
	from statistics import mean,median,mode
	print("-"*25+"\n"+
		  "Total adverbs: "+str(len(av))+"\n"+
		  "Total adjectives: "+str(len(aj))+"\n"+
		  "Total nouns: "+str(len(nn))+"\n"+
		  "Total possible combinations: "+combosFormatted+" (not factoring in numbers)\n"+
		  "Shortest possible passphrase length: "+str(min(avLengths)+min(ajLengths)+min(nnLengths))+"\n"+
		  "Longest possible passphrase length: "+str(max(avLengths)+max(ajLengths)+max(nnLengths)+5)+"\n"+
		  "Mean passphrase length: "+str(int(mean(avLengths)+mean(ajLengths)+mean(nnLengths)+4))+"\n"+
		  "Median passphrase length: "+str(int(median(avLengths)+median(ajLengths)+median(nnLengths))+4)+"\n"+
		  "Mode passphrase length: "+str(int(mode(avLengths)+mode(ajLengths)+mode(nnLengths))+4)+"\n"+
		  "-"*25)
	# Randomize the order of the arrays
	av = randomize(av,len(av))
	aj = randomize(aj,len(aj))
	nn = randomize(nn,len(nn))
	# Pick a random word from each randomized array
	newAdverb = av[int(random()*len(av))].capitalize()
	newAdjective = aj[int(random()*len(aj))].capitalize()
	newNoun = nn[int(random()*len(nn))].capitalize()
	# Possibly add a random number from 1 to 10,000
	if maybeNumber():
		from math import ceil
		number = str(ceil(random()*10000))
	else:
		number = ''
	# Assemble the passphrase
	newPassphrase = number+newAdverb+newAdjective+newNoun
	#################################################################### Needs attention
	print("The new passphrase will be: "+newPassphrase)
	print("Total entropy: ~"+str(int(entropy(newPassphrase))))
	if customCommand == ' {PASSPHRASE}':
		print("Password display command not found. Aborting.")
		exit()
	if not test:
		import RouterPasswording
		RouterPasswording.newPassphrase(newPassphrase)
	from os import system as execute
	execute(customCommand.replace("{password}",newPassphrase).replace("{passphrase}",newPassphrase))
开发者ID:WolfgangAxel,项目名称:Random-Projects,代码行数:59,代码来源:WifiRPG.py

示例13: stats

def stats(responses, last=100):
    def t(s):
        return ("\t" + s).ljust(20)

    def f(s):
        return "{:.2f}ms".format(s * 1000)

    values = responses
    if len(values) < 3:
        print("\tNot enough data")
        return
    if len(values) > last:
        print(t("COUNT"), len(values), "(but only using the last {})".format(last))
        values = values[-last:]
    else:
        print(t("COUNT"), len(values))

    if any([x["cache"] for x in values]):
        hits = len([x for x in values if x["cache"] == "HIT"])
        misses = len([x for x in values if x["cache"] == "MISS"])
        print(t("HIT RATIO"), "{:.1f}%".format(100 * hits / (hits + misses)))
        print(t("AVERAGE (all)"), f(statistics.mean([x["took"] for x in values])))
        print(t("MEDIAN (all)"), f(statistics.median([x["took"] for x in values])))
        try:
            print(
                t("AVERAGE (misses)"),
                f(statistics.mean([x["took"] for x in values if x["cache"] == "MISS"])),
            )
            print(
                t("MEDIAN (misses)"),
                f(
                    statistics.median(
                        [x["took"] for x in values if x["cache"] == "MISS"]
                    )
                ),
            )
            print(
                t("AVERAGE (hits)"),
                f(statistics.mean([x["took"] for x in values if x["cache"] == "HIT"])),
            )
            print(
                t("MEDIAN (hits)"),
                f(
                    statistics.median(
                        [x["took"] for x in values if x["cache"] == "HIT"]
                    )
                ),
            )
        except statistics.StatisticsError as exc:
            print(exc)
    else:
        hits = len([x for x in values if x["link"]])
        misses = len([x for x in values if not x["link"]])
        print(t("HIT RATIO"), "{:.1f}%".format(100 * hits / (hits + misses)))
        print(t("AVERAGE"), f(statistics.mean([x["took"] for x in values])))
        print(t("MEDIAN"), f(statistics.median([x["took"] for x in values])))

    with open("cdn-crawler-stats.json", "w") as f:
        json.dump(responses, f, indent=3)
开发者ID:peterbe,项目名称:django-peterbecom,代码行数:59,代码来源:cdn-crawler.py

示例14: indexEvolution_by_year

def indexEvolution_by_year(year):
    area_repo = AreaRepository(recreate_db=False, config=sqlite_config)
    indicator_repo = IndicatorRepository(recreate_db=False, config=sqlite_config)
    observation_repo = ObservationRepository(recreate_db=False, area_repo=area_repo, indicator_repo=indicator_repo,
                                             config=sqlite_config)
    index_indicator = indicator_repo.find_indicators_index()[0]
    observations = observation_repo.find_tree_observations(index_indicator.indicator, 'ALL', None, 'COMPONENT')
    areas = area_repo.find_countries(order="iso3")

    data = {'year': year, 'areas': OrderedDict(), 'stats': OrderedDict()}
    for area in sorted(areas, key=attrgetter('iso3')):
        for obs in sorted([obs for obs in observations if obs.area.iso3 == area.iso3],
                          key=lambda o: o.indicator.indicator):
            if area.iso3 not in data['areas']:
                data['areas'][area.iso3] = OrderedDict()
            if obs.indicator.indicator not in data['areas'][area.iso3]:
                data['areas'][area.iso3][obs.indicator.indicator] = OrderedDict()

            if obs.year == int(year):
                data['areas'][area.iso3][obs.indicator.indicator]['value'] = obs.value
                if obs.indicator.type == 'INDEX':
                    data['areas'][area.iso3][obs.indicator.indicator]['rank'] = obs.rank
                    data['areas'][area.iso3][obs.indicator.indicator]['rank_change'] = obs.rank_change

            if obs.indicator.type == 'INDEX':
                if 'score_evolution' not in data['areas'][area.iso3][obs.indicator.indicator]:
                    data['areas'][area.iso3][obs.indicator.indicator]['score_evolution'] = []
                    # data['areas'][area.iso3][obs.indicator.indicator]['value_evolution'] = []
                data['areas'][area.iso3][obs.indicator.indicator]['score_evolution'].append(
                    {'year': obs.year, 'value': obs.value})
                # data['areas'][area.iso3][obs.indicator.indicator]['value_evolution'].append(
                #     {'year': obs.year, 'value': obs.value})

    # Clean areas without data that year
    for area in list(data['areas'].keys()):
        if 'value' not in data['areas'][area]['ODB']:
            del data['areas'][area]

    for indicator_code in sorted(set([o.indicator.indicator for o in observations])):
        per_indicator_obs = [o.value for o in observations if
                             o.indicator.indicator == indicator_code and o.value is not None]
        if indicator_code not in data['stats']:
            data['stats'][indicator_code] = OrderedDict()
        data['stats'][indicator_code][':::'] = OrderedDict()
        data['stats'][indicator_code][':::']['mean'] = statistics.mean(per_indicator_obs)
        data['stats'][indicator_code][':::']['median'] = statistics.median(per_indicator_obs)
        for region in area_repo.find_regions():
            per_region_obs = [o.value for o in observations if
                              o.indicator.indicator == indicator_code and o.value is not None and o.area.iso3 in [c.iso3
                                                                                                                  for c
                                                                                                                  in
                                                                                                                  region.countries]]
            data['stats'][indicator_code][region.iso3] = OrderedDict()
            data['stats'][indicator_code][region.iso3]['mean'] = statistics.mean(per_region_obs)
            data['stats'][indicator_code][region.iso3]['median'] = statistics.median(per_region_obs)

    return json_response_ok(request, data)
开发者ID:TheWebFoundation,项目名称:odb-parser,代码行数:57,代码来源:api.py

示例15: run

def run(parser, args):
	if args.full_tsv:
		files = 0
		basecalled_files = 0
		stats = defaultdict(list)
		for fast5 in Fast5File.Fast5FileSet(args.files):
			files += 1
			fas = fast5.get_fastas_dict()
			if len(fas) > 0:
				basecalled_files += 1
			for category, fa in fas.iteritems():
				if fa is not None:
					stats[category].append(len(fa.seq))
					if category == 'twodirections':
						if fast5.is_high_quality():
							stats['2D_hq'].append(len(fa.seq))

			fast5.close()

		print "files\ttotal reads\t%d" % (files)
		print "files\ttotal base-called reads\t%d" % (basecalled_files)
		for category in sorted(stats.keys()):
			sizes = stats[category]

			if len(sizes) > 0:
				print "%s\ttotal reads\t%d" % (category, len(sizes))
				print "%s\ttotal base pairs\t%d" % (category, sum(sizes))
				print "%s\tmean\t%.2f" % (category, stat.mean(sizes))
				print "%s\tmedian\t%d" % (category, stat.median(sizes))
				print "%s\tmin\t%d" % (category, min(sizes))
				print "%s\tmax\t%d" % (category, max(sizes))
				nxvalues = stat.NX(sizes, [25,50,75])
				print "%s\tN25\t%d" % (category, nxvalues[25])
				print "%s\tN50\t%d" % (category, nxvalues[50])
				print "%s\tN75\t%d" % (category, nxvalues[75])
			else:
				logger.warning("No valid sequences observed.\n")
	else:
		sizes = []
		for fast5 in Fast5File.Fast5FileSet(args.files, group=args.group):
			fas = fast5.get_fastas(args.type)
			sizes.extend([len(fa.seq) for fa in fas if fa is not None])
			fast5.close()

		if len(sizes) > 0:
			print "total reads\t%d" % (len(sizes))
			print "total base pairs\t%d" % (sum(sizes))
			print "mean\t%.2f" % (stat.mean(sizes))
			print "median\t%d" % (stat.median(sizes))
			print "min\t%d" % (min(sizes))
			print "max\t%d" % (max(sizes))
                        nxvalues = stat.NX(sizes, [25,50,75])
                        print "N25\t%d" % (nxvalues[25])
                        print "N50\t%d" % (nxvalues[50])
                        print "N75\t%d" % (nxvalues[75])
		else:
			logger.warning("No valid sequences observed.\n")
开发者ID:EQt,项目名称:poretools,代码行数:57,代码来源:stats.py


注:本文中的statistics.median函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。