当前位置: 首页>>代码示例>>Python>>正文


Python random.sample函数代码示例

本文整理汇总了Python中random.sample函数的典型用法代码示例。如果您正苦于以下问题:Python sample函数的具体用法?Python sample怎么用?Python sample使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sample函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Fitch

def Fitch(rooted_tree):
    # using of Fitch algorithm 
    
    # traverse Tree in post-order
    for node in rooted_tree.traverse('postorder'):
        if not node.is_leaf():
            children = node.get_children()
            intersect = (children[0].data['split']).intersection(children[1].data['split'])
            if len(intersect) == 0:
                node.data['split'] = (children[0].data['split']).union(children[1].data['split'])
            else:
                node.data['split'] = intersect
    # traverse top-down 
    
    for node in rooted_tree.traverse('levelorder'):
        if node.is_root(): # for the root 
            # if the root has 2 candidatnode.add_features()e, randomly choose 1, and get the numeric value
            node.data['split'] = (random.sample(node.data['split'],1))[0] 
        else:
            # for children node, first check the data from the ancestor
            ancestors = node.get_ancestors() # get the list of ancestor
            data = ancestors[0].data['split'] # get the data from the parent
            if data in node.data['split']:# check if the node.data has value equal to its parent data
                node.data['split'] =data
            else:
                node.data['split'] = (random.sample(node.data['split'],1))[0]
    return rooted_tree
开发者ID:nguyenngochuy91,项目名称:Ancestral-Blocks-Reconstruction,代码行数:27,代码来源:findParent_global.py

示例2: convert

def convert(snippet, phrase):
	#snippet = question, phrase = answer
	class_names = [w.capitalize() for w in random.sample(WORDS, snippet.count("%%%"))]
	other_names = random.sample(WORDS, snippet.count("***"))
	results = []
	param_names = []
	
	for i in range(0, snippet.count("@@@")):
		param_count = random.randint(1,3)
		param_names.append(', '.join(random.sample(WORDS, param_count)))
	
	for sentence in snippet, phrase:
		# copy sentence in result
		result = sentence[:]
		
		# fake class names
		for word in class_names:
			result = result.replace("%%%", word, 1)
		
		# fake other names
		for word in other_names:
			result = result.replace("***", word, 1)
		
		# fake parameter lists
		for word in param_names:
			result = result.replace("@@@", word, 1)
			
		results.append(result)
	
	return results
开发者ID:Fleid,项目名称:LearnPythonTheHardWay,代码行数:30,代码来源:oop_test.py

示例3: convert

def convert(snippet, phrase):
    class_names = [w.capitalize() for w in
               random.sample(WORDS, snippet.count("%%%"))]
    other_names = random.sample(WORDS, snippet.count("***"))
    results = []
    param_names = []

    for i in range(0, snippet.count("@@@")):
        param_count = random.randint(1,3)
        param_names.append(', '.join(random.sample(WORDS, param_count)))

    for sentence in snippet, phrase:
        result = sentence[:]

        for word in class_names:
            result = result.replace("%%%", word, 1)

        for word in other_names:
            result = result.replace("***", word, 1)

        for word in param_names:
            result = result.replace("@@@", word, 1)

        results.append(result)

    return results
开发者ID:swheatley,项目名称:LPTHW,代码行数:26,代码来源:ex41.1.py

示例4: disturb

def disturb(g,cl):
    ng=g.copy()
    ng.remove_edges_from(ng.edges())
    for i in range(len(cl)-1):#连接簇之间不变的线
        j=i+1
        while j<len(cl):
            for x in itertools.product(cl[i],cl[j]):#簇之间两两(cl[i],cl[j])笛卡尔积
                if g.has_edge(x[0],x[1]):
                    ng.add_edge(x[0],x[1])
            j+=1
    sub=[]
    for i in range(len(cl)):#打乱簇内线
        sub=g.subgraph(cl[i])
        edges=sub.edges()
        numOfe=sub.number_of_edges()
        sub.remove_edges_from(edges)
        setE=[]
        tupleE=[]
        for k in range(numOfe):#生成numOfe条线
            l=set(random.sample(cl[i],2))#随机生成cl[i]内两个数,并生成集合,因为集合无序,容易判断该两个数是否已经生成了
            while l in setE:
                l=set(random.sample(cl[i],2))
            setE.append(l)
        
        for item in setE:#集合变元组,用来添加边
            tupleE.append(tuple(item))
        ng.add_edges_from(tupleE)
    return ng
开发者ID:liupenggl,项目名称:dpr,代码行数:28,代码来源:local_perturbation.py

示例5: generateRandomInices

def generateRandomInices(r, c, p, t):
	l = []
	while len(l) < p:
		randomIndex = (random.sample(range(r),1)[0], random.sample(range(c),1)[0])
		if randomIndex not in t and randomIndex not in l:
			l += [randomIndex]
	return l
开发者ID:Ahmadposten,项目名称:No-Propopagation-Networks,代码行数:7,代码来源:characteRecognition-noprop.py

示例6: doge

def doge(phenny,input):
    text = input.groups()
    if not text[1]:
        phenny.reply('  no word       such fail             wow               bad say')
        return
    syn = get_from_thesaurus(text[1])
    if not syn:
        phenny.reply('  no word       such fail             wow               bad say')
        return
    syn = [(x.split())[0] for x in syn]
    syn = set(syn)
    n = min([random.randint(3,6), len(syn)])
    dog = random.sample(shibe,n)
    ss = random.sample(syn,n)
    out = []
    wow = 0
    for i in range(0,n):
        sp = [' ' for j in range(0,random.randint(5,20))]
        if not wow and random.randint(0,1):
            out.append(''.join(sp)+'wow')
            wow = 1
            i = i - 1
        else:
            out.append(''.join(sp)+dog[i]+ss[i])
    phenny.reply(' '.join(out))
开发者ID:sirpercival,项目名称:eiko,代码行数:25,代码来源:why.py

示例7: split_gtf

def split_gtf(gtf, sample_size=None, out_dir=None):
    """
    split a GTF file into two equal parts, randomly selecting genes.
    sample_size will select up to sample_size genes in total
    """
    if out_dir:
        part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
        part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
        part1 = os.path.join(out_dir, part1_fn)
        part2 = os.path.join(out_dir, part2_fn)
        if file_exists(part1) and file_exists(part2):
            return part1, part2
    else:
        part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
        part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name

    db = get_gtf_db(gtf)
    gene_ids = set([x['gene_id'][0] for x in db.all_features()])
    if not sample_size or (sample_size and sample_size > len(gene_ids)):
        sample_size = len(gene_ids)
    gene_ids = set(random.sample(gene_ids, sample_size))
    part1_ids = set(random.sample(gene_ids, sample_size / 2))
    part2_ids = gene_ids.difference(part1_ids)
    with open(part1, "w") as part1_handle:
        for gene in part1_ids:
            for feature in db.children(gene):
                part1_handle.write(str(feature) + "\n")
    with open(part2, "w") as part2_handle:
        for gene in part2_ids:
            for feature in db.children(gene):
                part2_handle.write(str(feature) + "\n")
    return part1, part2
开发者ID:rndw,项目名称:bcbio-nextgen,代码行数:32,代码来源:gtf.py

示例8: ReWeightedRW

def ReWeightedRW(G, incomes, sample_size):
	node = random.sample(G, 1)[0]

	sampling = list()
	node_degrees = list()
	node_incomes = list()

	for i in range(sample_size):
		sampling.append(node)
		node_degrees.append(len(G[node]))
		node_incomes.append(incomes[node])

		# select a random neighbor of node
		node = random.sample(G.get(node), 1)[0]

	# the normal random walk. biased, without correction.
	biased_average_degrees = numpy.average(node_degrees)
	biased_average_incomes = numpy.average(node_incomes)

	# correcting the random walk sampling with inversed-node-degree prob
	normalization_constant = 0.0
	for x in node_degrees:
		normalization_constant += (1.0 / x)

	prob = list()
	for x in node_degrees:
		temp = (1.0 / x) / normalization_constant
		prob.append(temp)

	reweighted_average_degrees = sum(i*j for i, j in zip(prob,node_degrees))
	reweighted_average_incomes = sum(i*j for i, j in zip(prob,node_incomes))
	
	return [biased_average_degrees, reweighted_average_degrees, biased_average_incomes, reweighted_average_incomes]
开发者ID:ahma88,项目名称:Metropolis-Hastings-random-walk-and-re-weighted-random-walk,代码行数:33,代码来源:randomwalk.py

示例9: convert

def convert(snippets,phrase):
    class_names=[w.capitalize() for w in random.sample(words,snippet.count("%%%"))]
    #print class_names
    other_names=random.sample(words,snippet.count("***"))
    #print other_names
    results=[]
    param_names=[]
    
    for i in range(0,snippet.count("@@@")):
        param_count=random.randint(1,3)
        param_names.append(','.join(random.sample(words,param_count)))
        #print param_names
        
    for sentence in [snippet, phrase]:
        #print sentence
        result=sentence[:]
        #print result
    #result=[snippet,phrase]     
        for word in class_names:
            result=result.replace('%%%',word,1)
            #print word
            #print result+"a class names"
        for word in other_names :
            result=result.replace("***",word,1)
            #print word
            #print result+"a other names"
        for word in param_names:
            result=result.replace("@@@",word,1)
            #print word
            #print result+"a param names"

        results.append(result)
    #print results
    #print result
    return results
开发者ID:Ravichandra-C,项目名称:Euler,代码行数:35,代码来源:word.py

示例10: __init__

    def __init__(self, n=1000, k=10, p=0.02947368):
        self.n = n
        self.k = k
        self.p = p
        self.ws = nx.watts_strogatz_graph(self.n, self.k, self.p, seed='nsll')
        nx.set_node_attributes(self.ws, 'SIR', 'S')
        self.clustering = nx.clustering(self.ws)
        self.betweenness = nx.betweenness_centrality(self.ws)
        p_r_0 = 0.001
        r_0 = int(self.n * p_r_0)
        if r_0 < 1:
            r_0 = 1
        random.seed('nsll')
        self.r = random.sample(self.ws.nodes(), r_0)

        i_0 = 4
        if i_0 < r_0:
            i_0 += 1
        random.seed('nsll')
        self.infected = random.sample(self.ws.nodes(), i_0)
        for n in self.infected:
            self.ws.node[n]['SIR'] = 'I'
        for n in self.r:
            self.ws.node[n]['SIR'] = 'R'
        self.s = self.n - len(self.infected) - len(self.r)
        print(self.r)
        print(self.infected)
开发者ID:nasyxx,项目名称:CUFE_Math_modeling_Final,代码行数:27,代码来源:p03.py

示例11: pick_one

    def pick_one(table):
        """Return one random element of a sequence or dict"""

        try:
            return table[random.sample(table.keys(), 1)[0]]
        except AttributeError:
            return random.sample(table, 1)[0]
开发者ID:ajs,项目名称:tools,代码行数:7,代码来源:pathfindertreasure.py

示例12: createCrossValidationFiles

def createCrossValidationFiles(n):
    # Make copies of the original positive and negative review files
    copyFile('hotelPosT-train.txt', 'postrain-reviews.txt')
    copyFile('hoteNegT-train.txt', 'negtrain-reviews.txt')
    
    # Read the positive and negative reviews into two separate lists
    posReviews = readFileIntoList('postrain-reviews.txt')
    negReviews = readFileIntoList('negtrain-reviews.txt')    
    
    # Use n random reviews for the positive review test set
    # Use the remaining reviews for the positive training set
    testPosReviews = random.sample(posReviews, n)
    trainingPosReviews = [review for review in posReviews if review not in testPosReviews]
    
    # Use n random reviews for the negative review test set
    # Use the remaining reviews for the negative training set
    testNegReviews = random.sample(negReviews, n)
    trainingNegReviews = [review for review in negReviews if review not in testNegReviews]
    
    # Write the test reviews to the test set file
    writeListToFile('test-reviews.txt', testPosReviews, False)
    writeListToFile('test-reviews.txt', testNegReviews, True)
    
    # Write the training reviews to the test set file
    writeListToFile('postrain-reviews.txt', trainingPosReviews, False)
    writeListToFile('negtrain-reviews.txt', trainingNegReviews, False) 
开发者ID:gddmkr42171822,项目名称:csci5832,代码行数:26,代码来源:werthman-robert-assgn2.py

示例13: convert

def convert(snippet, phrase):
    '''

    :param snippet:
    :param phrase:
    :return:
    '''
    class_names = [w.capitalize() for w in
                   random.sample(WORDS, snippet.count("%%%"))]
    other_names = random.sample(WORDS, snippet.count("***"))
    results = []
    param_names = []

    for i in range(0, snippet.count("@@@")):
        param_count = random.randint(1,3)
        param_names.append(', '.join(random.sample(WORDS, param_count)))

    for sentence in snippet, phrase:
        result = sentence[:]# result is a list. a copy of sentence

        # fake class names
        for word in class_names:
            result = result.replace("%%%", word, 1) #.replace replaces a string.

        # fake other names
        for word in other_names:
            result = result.replace("***", word, 1)

        # fake parameter lists
        for word in param_names:
            result = result.replace("@@@", word, 1)

        results.append(result)

    return results
开发者ID:runiat,项目名称:grade12,代码行数:35,代码来源:oop_test.py

示例14: change_to_Random_Dir

    def change_to_Random_Dir(self):
        self.logger.debug("Current DIR: %s" % self.client.pwd())
        level1_dirs = []
        for file_name in self.client.nlst():
            try:
                self.client.cwd('/' + file_name)
                level1_dirs.append(file_name)
                self.logger.debug("Directory [L-1]: %s" % file_name)
            except ftplib.error_perm as detail:
                self.logger.debug("It's probably not a directory [L-1]: %s : %s" % (file_name, detail))
        self.logger.debug("Number of L1-Dirs: %i" % len(level1_dirs))

        randomly_sampled_L1_dir = random.sample(level1_dirs, 1)[0]
        self.client.cwd('/' + randomly_sampled_L1_dir)
        self.logger.debug("Current Level-1 DIR selected: %s" % self.client.pwd())

        level2_dirs = []
        for file_name_l2 in self.client.nlst():
            try:
                self.client.cwd('/' + randomly_sampled_L1_dir + '/' +file_name_l2)
                level2_dirs.append(file_name_l2)
                self.logger.debug("Directory [L-2]: %s" % file_name_l2)
            except ftplib.error_perm as detail:
                self.logger.debug("It's probably not a directory [L-2]: %s : %s" % (file_name_l2, detail))
        self.logger.debug("Number of L2-Dirs: %i" % len(level2_dirs))

        rand_L2_dir = random.sample(level2_dirs, 1)[0]
        self.client.cwd('/' + randomly_sampled_L1_dir + '/' + rand_L2_dir)
        self.logger.debug("Current Level-2 DIR selected: %s" % self.client.pwd())
        return self.client.pwd() + '/'
开发者ID:irvinhomem,项目名称:LivePacketCap,代码行数:30,代码来源:multiSerialFtpCap.py

示例15: setUp

 def setUp(self):
     IMP.test.TestCase.setUp(self)
     self.numbers = random.sample(range(0, 1000), 100)
     self.keys = random.sample(range(0, 1000), 100)
     self.dictionary = dict()
     for key, val in zip(self.keys, self.numbers):
         self.dictionary[key] = val
开发者ID:AljGaber,项目名称:imp,代码行数:7,代码来源:test_argminmax.py


注:本文中的random.sample函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。