当前位置: 首页>>代码示例>>Python>>正文


Python Random.shuffle方法代码示例

本文整理汇总了Python中random.Random.shuffle方法的典型用法代码示例。如果您正苦于以下问题:Python Random.shuffle方法的具体用法?Python Random.shuffle怎么用?Python Random.shuffle使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在random.Random的用法示例。


在下文中一共展示了Random.shuffle方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run_one

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
 def run_one(self, data, agent1_class, classification_time_limit, learning_time_limit, num_folds = 2, seed = 1):
     '''
     Compares two learning agents by cross-validation of the 
     given dataset and McNemar's test for statistical significance.
     
     @param data: A list of examples and their classification. Assumes the class is discrete.
     @param agent1_class: The first agent's class. 
     @param agent2_class: The second agent's class.
     @param classification_time_limit: The time limit for classification of a single instance.
     @param learning_time_limit: The time limit for learning.
     @param num_folds: The number of folds to perform cross validation upon.
     @param seed: The random generator's seed for creating the folds.
     @return: The tuple (confusion1, confusion2, mcnemar).
              confusion1 - The first agent's confusion matrix.
              confusion2 - The second agent's confusion matrix.
              mcnemar    - McNemar's comparison of the agents.
     '''
     classes = tuple(set(instance[1] for instance in data))
     confusion1 = ConfusionMatrix(classes)
     
     shuffled_data = data[:]
     rnd = Random(seed)
     rnd.shuffle(shuffled_data)
     
     for train, test in self._createFolds(shuffled_data, num_folds):
         agent1 = self._setupAgent(train, agent1_class, classification_time_limit, learning_time_limit)
         
         for test_instance, test_classification in test:
             classification1 = self._classify(test_instance, agent1, classification_time_limit)
             
             confusion1.update(test_classification, classification1)
     
     return confusion1
开发者ID:gzvulon,项目名称:IAI3-LM,代码行数:35,代码来源:s_learning_analyzer.py

示例2: main

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def main(load_from_epoch=None):
	prefix = 'stb'
	raw_data = reader.stb_raw_data("./data/")
	train_data, valid_data, test_data, _ = raw_data

	config = get_config()
	if load_from_epoch == None:
		m = SentimentLanguageModel(config=config)
	else:
		arg_params = load_checkpoint(prefix, load_from_epoch)
		m = SentimentLanguageModel(config, arg_params)
		m.logger("load from %s %d" % (prefix, load_from_epoch))

	last_loss = 1e10
	for i in xrange(config.max_epoch):
		my_random = Random()
		my_random.seed(1)
		my_random.shuffle(train_data)

		train_lm_loss, train_senti_loss, train_acc = run_epoch(m, train_data, True, True)
		m.logger("Epoch: %d Training LM Loss: %.3f Training Senti Loss: %.3f (Accuracy: %.3f)" %
								(i + 1, train_lm_loss, train_senti_loss, train_acc))

		valid_lm_loss, valid_senti_loss, valid_acc = run_epoch(m, valid_data, False, False)
		m.logger("Epoch: %d Valid LM Loss: %.3f Valid Senti Loss: %.3f (Accuracy: %.3f)" %
								(i + 1, valid_lm_loss, valid_senti_loss, valid_acc))

		m.update_lr(valid_lm_loss)
		m.save(prefix, i)
开发者ID:chpzcc,项目名称:GraduateDesign,代码行数:31,代码来源:stb.py

示例3: test_ordered_dictionaries_preserve_keys

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def test_ordered_dictionaries_preserve_keys():
    r = Random()
    keys = list(range(100))
    r.shuffle(keys)
    x = fixed_dictionaries(
        OrderedDict([(k, booleans()) for k in keys])).example()
    assert list(x.keys()) == keys
开发者ID:KrzysiekJ,项目名称:hypothesis-python,代码行数:9,代码来源:test_simple_collections.py

示例4: get_seq_order_for_epoch

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
 def get_seq_order_for_epoch(self, epoch, num_seqs, get_seq_len=None):
   """
   :returns the order for the given epoch.
   This is mostly a static method, except that is depends on the configured type of ordering,
    such as 'default' (= as-is), 'sorted' or 'random'. 'sorted' also uses the sequence length.
   :param int epoch: for 'random', this determines the random seed
   :type num_seqs: int
   :param get_seq_len: function (originalSeqIdx: int) -> int
   :rtype: list[int]
   """
   assert num_seqs > 0
   seq_index = list(range(num_seqs)); """ :type: list[int]. the real seq idx after sorting """
   if self.seq_ordering == 'default':
     pass  # Keep order as-is.
   elif self.seq_ordering == 'sorted':
     assert get_seq_len
     seq_index.sort(key=get_seq_len)  # sort by length
   elif self.seq_ordering.startswith('random'):
     tmp = self.seq_ordering.split(':')
     nth = int(tmp[1]) if len(tmp) > 1 else 1
     # Keep this deterministic! Use fixed seed.
     rnd_seed = ((epoch-1) / nth + 1) if epoch else 1
     rnd = Random(rnd_seed)
     rnd.shuffle(seq_index)
   else:
     assert False, "invalid batching specified: " + self.seq_ordering
   return seq_index
开发者ID:atuxhe,项目名称:returnn,代码行数:29,代码来源:Dataset.py

示例5: create_keywords_dict

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def create_keywords_dict(config):
    keywords = []
    with open(keywords_path) as file:
        regex = re.compile('[^a-zA-Z]')
        for index, line in enumerate(file):
            if '#' in line:
                continue
            word = regex.sub('', line)
            if word in keywords:
                print 'word found in dict! ', word
                continue
            if len(word) <= 2:
                continue
            keywords.append(word)
    if config.shuffle_keywords_seed:
        # print keywords
        # keywords = keywords[:config.shuffle_stop]
        rand = Random(config.shuffle_keywords_seed)
        rand.shuffle(keywords)
        # print keywords
    keywords_dict = {}
    for i in range(config.x):
        keywords_dict[i] = keywords[i]
        keywords_dict[keywords[i]] = i

    print 'keywords_dict size is: ', len(keywords_dict)
    return keywords_dict
开发者ID:uriklarman,项目名称:TreasureHunter,代码行数:29,代码来源:dictionaries.py

示例6: generateGroupCicle

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
	def generateGroupCicle(self,groupId):
		connection = MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,db=self.db)
		cursor = connection.cursor()

		# determina todos os membros do grupo
		cursor.execute("SELECT userId FROM participations WHERE groupId=%d"%(groupId))
		rows = cursor.fetchall()
		users = []
		for r in rows:
			users.append(r[0])
		r = Random()
		r.shuffle(users)

		tuples = []
		for a in range(0,len(users)-1):
			tuples.append("(%d,%d,%d)"%(users[a],users[a+1],groupId))
		tuples.append("(%d,%d,%d)"%(users[-1],users[0],groupId))

		print("Tuples")
		for t in tuples:
			print(t)

		query = "INSERT INTO exchanges (fromUserId, toUserId, groupId) VALUES " + ",".join(tuples)
		try:
			cursor.execute(query)
		except MySQLdb.Error, e:
			print("Erro no banco de dados: %s"%e)
			print("Tentou-se executar %s"%(query))
开发者ID:mgmillani,项目名称:rotalivros,代码行数:30,代码来源:database.py

示例7: MulticlassAveragedPerceptron

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
class MulticlassAveragedPerceptron(object):
    """
    Multiclass classification via the averaged perceptron. Features
    are assumed to be binary, hashable (e.g., strings), and very sparse. 
    Labels must also be hashable.
    """

    def __init__(self, default=None, seed=None):
        self.classes = {default}
        self.random = Random(seed)
        self.weights = defaultdict(partial(defaultdict, LazyWeight))
        self.time = 0
    
    def fit(self, Y, Phi, epochs, alpha=1):
        # copy data so we can mutate it in place
        data = list(zip(Y, Phi))
        for _ in xrange(epochs):
            self.random.shuffle(data)
            for (y, phi) in data:
                self.fit_one(y, phi)
        self.finalize()

    def fit_one(self, y, phi, alpha=1):
        self.classes.add(y)
        yhat = self.predict(phi)
        if y != yhat:
            self.update(y, yhat, phi, alpha)

    def update(self, y, yhat, phi, alpha=1):
        """
        Given feature vector `phi`, reward correct observation `y` and
        punish incorrect hypothesis `yhat`, assuming that `y != yhat`.
        `alpha` is the learning rate (usually 1).
        """
        for phi_i in phi:
            ptr = self.weights[phi_i]
            ptr[y].update(+alpha, self.time)
            ptr[yhat].update(-alpha, self.time)
        self.time += 1

    def predict(self, phi):
        """
        Predict the most likely class for `phi`
        """
        scores = dict.fromkeys(self.classes, 0)
        for phi_i in phi:
            for (cls, weight) in self.weights[phi_i].iteritems():
                scores[cls] += weight.get()
        (yhat, _) = max(scores.iteritems(), key=itemgetter(1))
        return yhat

    def finalize(self):
        """
        Prepare for inference by applying averaging

        TODO(kbg): also remove zero-valued weights?
        """
        for (phi_i, clsweights) in self.weights.iteritems():
            for (cls, weight) in clsweights.iteritems():
                weight.average(self.time)
开发者ID:kylebgorman,项目名称:kylebgorman.github.io,代码行数:62,代码来源:perceptron_BoVhgwqU.py

示例8: deal_out

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def deal_out ():
    """Randomly deal out nodes to several hands, one per retriever.
This is set up so that each player gets a retriever node, and then
all other nodes are distributed almost-equally among each player."""
    rand = Random()
    nodes = copy.deepcopy(global_vars['node_store'])
    # ensure that each "hand" includes a retriever
    retrievers = nodes['text']['retrievers']
    rand.shuffle(retrievers)
    hands = map(lambda x:['text.retrievers.'+x],retrievers)
    # now that those have been dealt with, remove them
    del nodes['text']['retrievers']
    # now iterate through the remaining leaves and linearize them
    nodelist = []
    for first_key in nodes:
        for second_key in nodes[first_key]:
            for item in nodes[first_key][second_key]:
                nodelist.append(".".join([first_key,second_key,item]))
    # now deal these out like a pack of cards
    pos = 0
    num_hands = len(hands)
    num_nodes = len(nodelist)
    while pos < num_nodes:
        hands[divmod (pos, num_hands)[1]].append(nodelist[pos])
        pos = pos + 1
        
    return hands
开发者ID:holtzermann17,项目名称:FloWrTester,代码行数:29,代码来源:flowr_web_test.py

示例9: TestOrderedDict

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
class TestOrderedDict(unittest.TestCase):

    def setUp(self):
        self.rnd = Random(0xC0EDA55)
        self.seq = range(1000)
        self.rnd.shuffle(self.seq)
        self.od = OrderedDict()
        for k in self.seq:
            self.od[k] = self.rnd.random()

    def testorder(self):
        self.failUnlessEqual(list(self.od), self.seq)
        k,v = map(list, zip(*self.od.iteritems()))
        self.failUnlessEqual(k, self.seq)

    def testcp(self):
        od2 = OrderedDict(self.od)
        self.failUnlessEqual(list(od2), self.seq)
        self.failUnlessEqual(od2, self.od)

    def testdictcp(self):
        d = dict(self.od)
        self.failUnlessEqual(d, self.od)

    def testpickle(self):
        od2 = pickle.loads(pickle.dumps(self.od))
        self.failUnlessEqual(list(od2), self.seq)
        self.failUnlessEqual(od2, self.od)
开发者ID:matthagy,项目名称:Jamenson,代码行数:30,代码来源:collections.py

示例10: init_seq_order

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
  def init_seq_order(self, epoch=None, seq_list=None):
    assert seq_list is None, "seq_list not supported for %s" % self.__class__
    need_reinit = self.epoch is None or self.epoch != epoch
    super(CombinedDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list)
    if not need_reinit:
      return False

    # We just select for which seq-idx we will use which dataset.
    # The ordering of the seqs in the datasets will not be set here
    # (do that in the config for the specific dataset).

    seqs_dataset_idx = self._canonical_seqs_dataset_idxs()
    if self.seq_ordering in ("default", "random"):  # default is random. this is different from base class!
      from random import Random
      rnd = Random(self.epoch)
      rnd.shuffle(seqs_dataset_idx)
    elif self.seq_ordering == "in-order":
      pass  # keep as-is
    elif self.seq_ordering == "reversed":
      seqs_dataset_idx = reversed(seqs_dataset_idx)
    else:
      raise Exception("seq_ordering %s not supported" % self.seq_ordering)

    self.dataset_seq_idxs = self._dataset_seq_idxs(seqs_dataset_idx)
    assert self.num_seqs == len(self.dataset_seq_idxs)

    for dataset in self.datasets.values():
      dataset.init_seq_order(epoch=epoch)
    return True
开发者ID:atuxhe,项目名称:returnn,代码行数:31,代码来源:MetaDataset.py

示例11: run

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
    def run(self, dataset, parsers):
        logger.info("Loading dataset")
        random = Random(1)        

        dataset = [(source, list(group)) for source, group in
                    groupby(dataset, itemgetter('source'))]

        if len(dataset) != len(set(map(itemgetter(0), dataset))):
            raise ValueError("Dataset contains non-contiguous source examples")

        # Shuffle the dataset and each group within the dataset
        random.shuffle(dataset)
        for source, group in dataset:
            random.shuffle(group)

        length = len(dataset)
        assert length > 1

        train_length = int(0.8*length)
        train_limit = self.train_limit or train_length
        train_set = dataset[:min(train_length, train_limit)]
        test_set = dataset[train_length:]

        logger.info("Training with %d items", len(train_set))

        results = {}
        for parser in parsers:
            logger.info("Training parser %r", parser)
            parser.train(train_set)
            logger.info("Evaluating on test set")
            parser_results = parser.test(test_set)
            results[repr(parser)] = parser_results
        return results
开发者ID:daoudclarke,项目名称:sempre-baselines,代码行数:35,代码来源:experiment.py

示例12: pseudo_random_combinations

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def pseudo_random_combinations(values, tuple_size, result_limit=False, avoid_all_combinations=False):
    rand = Random()
    rand.seed(SEED)

    if not result_limit:
        result_limit = int(pow(len(values),tuple_size))
    else:
        result_limit = int(min(int(pow(len(values),tuple_size)), result_limit))

    if avoid_all_combinations:
        combinations = []
        combinations_set = set()
        while len(combinations_set) < result_limit:
            if len(combinations_set) %10000 == 0:
                print 'len(combinations_set): ', len(combinations_set)
            combo = random_product(values, rand=rand, repeat=tuple_size)
            if combo not in combinations_set:
                combinations.append(combo)
                combinations_set.add(combo)
        return combinations

    else:
        combinations = [x for x in itertools.product(values, repeat=tuple_size)]
        rand.shuffle(combinations)
        return combinations[:result_limit]
开发者ID:uriklarman,项目名称:TreasureHunter,代码行数:27,代码来源:combinations_provider.py

示例13: gen_passwd

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
def gen_passwd(pwd_len=min_allowed_pwd_len, strict_policy=0, seed=None):
    """
    Generates Paswords of pwd_len, that optinally strictly follows
    the Password Policy.

    if pwd_len < min_possible_len then policy cannot be enforced - if this happens,
    and strict_policy=1 it returns None.

    If pwd_len = min_possible_pwd_len then generate 4 unique tokens from vc, 1 token from
    vn and 1 token from vsc.

    if pwd_len > min_possible_pwd_len, then do as in previous step and then randomize the rest of the tokens.
    """

    # Basic and obvious input checking
    if strict_policy:
        if pwd_len < min_allowed_pwd_len:
            return None

    # Since we are generating passwords, we are nice to the poor
    # sods having to read and type them in, so we remove some
    # typcial character ambiguities. Yes, I know, this reduces the
    # Password Variation Space. See if I care - you're not the one
    # having to read the passwords or deal with complaints about that :-)
    # So, we reduce special chars
    rvs = vsc.replace(' ', '') # remove space
    rvs = rvs.replace('|', '') # remove pipe
    # reduce valid numbers
    rvn =  vn.replace('1', '') # remove nr 1
    rvn = rvn.replace('0', '') # remove zero
    # reduce valid chars
    rvc =  vc.replace('l', '') # remove lowercase L
    rvc = rvc.replace('I', '') # remove uppercase I
    rvc = rvc.replace('O', '') # remove uppercase O

    from random import Random
    if seed is None:
        seed = time() * time()
    gen = Random(seed)

    if not strict_policy:
        pl = gen.sample(rvc + rvn + rvs, pwd_len)
        return ''.join(pl)

    # We have a strict enforcement policy. Try for ten iterations, if not 
    # successfull then return None.
    rl_len = pwd_len - (min_char_tokens + min_num_tokens + min_special_tokens)
    for i in range(1,10):
        pl = gen.sample(rvc, min_char_tokens)    + \
             gen.sample(rvn, min_num_tokens)     + \
             gen.sample(rvs, min_special_tokens) + \
             gen.sample(rvc + rvn + rvs, rl_len)
        gen.shuffle(pl)
        pwd = ''.join(pl)
        err = check_passwd(pwd)
        if err == []:
            return pwd
    return None
开发者ID:jacob-carrier,项目名称:code,代码行数:60,代码来源:recipe-576561.py

示例14: PercolationSimulation

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
class PercolationSimulation(object):

    def __init__(self, N, rseed=None):
        self.N = N
        # La grilla es de N * N, pero se agregan dos componentes virtuales
        self.qu = WeightedQuickUnionUF(N * N + 2, debug=False)
        self.virt_top = N * N
        self.virt_bottom = N * N + 1

        # Usamos un hack: hay dos nodos virtuales en WQU, una para cada borde
        # Conectamos todos los nodos de cada borde a su nodo virtual, luego checkeamos si ambos nodos son conexos
        # Si esto es True, el sistema percola
        for i in range(N):
            self.qu.union(N * N, i)  # El nodo N * N es virtual top
        for i in range(N * N - N, N * N):
            self.qu.union(N * N + 1, i)  # El nodo N * N + 1 es virtual bottom

        self.open = [False] * (N * N)  # Indica si el nodo esta abierto o no
        self.rng = Random(rseed) if rseed else Random()

    def adyacentes(self, p):
        # Retorna los id de los nodos abiertos adyacentes a p
        adyacentes = []
        izq = p - 1
        derecha = p + 1
        arriba = p - self.N
        abajo = p + self.N

        # Checkea a los vecinos del nodo, viendo si realmente son vecinos, y
        # estan abiertos
        for nodo in (izq, derecha, arriba, abajo):
            if 0 < nodo < self.N * self.N and self.open[nodo]:
                adyacentes.append(nodo)
        return adyacentes

    def _percola(self):  # Si ambos nodos virtuales son conexos, bingo!
        return self.qu.connected(self.virt_top, self.virt_bottom)

    def umbral(self):
        cerrados = range(self.N * self.N)  # Todos los sitios parten cerrados
        # Hacemos un shuffle, para ir abriendo sitios aleatoriamente
        self.rng.shuffle(cerrados)

        while cerrados:
            nodo = cerrados.pop()
            self.open[nodo] = True  # Se abre el nodo
            vecinos = self.adyacentes(nodo)  # Se obtienen los nodos adyacentes

            # Se establece un enlace entre el nodo y cada nodo adyacente
            for vecino in vecinos:
                self.qu.union(nodo, vecino)

            if self._percola():
                break  # Si el sistema percola, terminamos
        abiertos = float(self.N ** 2 - len(cerrados))

        # La estimación del umbral de percolación
        return abiertos / (self.N * self.N)
开发者ID:diegoahg,项目名称:Lab2_Paralela,代码行数:60,代码来源:sec.py

示例15: Generator

# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import shuffle [as 别名]
class Generator(object):
  def __init__(self):
    super().__init__()
    self.buffer = bytearray(source=ALPHABET64, encoding='UTF-8')
    self.line_feed = bytes(source='\n', encoding='UTF-8')
    self.random = Random(x=1)

  def next_word(self):
    self.random.shuffle(self.buffer)
    return self.buffer
开发者ID:bgdev,项目名称:sorting-battle,代码行数:12,代码来源:sample.py


注:本文中的random.Random.shuffle方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。