当前位置: 首页>>代码示例>>Python>>正文


Python itertools.groupby方法代码示例

本文整理汇总了Python中itertools.groupby方法的典型用法代码示例。如果您正苦于以下问题:Python itertools.groupby方法的具体用法?Python itertools.groupby怎么用?Python itertools.groupby使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在itertools的用法示例。


在下文中一共展示了itertools.groupby方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: format_mask

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def format_mask(x):
    """
    Formats a mask into a readable string.
    Args:
        x (ndarray): an array with the mask;

    Returns:
        A readable string with the mask.
    """
    x = numpy.asanyarray(x)
    if len(x) == 0:
        return "(empty)"
    if x.dtype == bool:
        x = numpy.argwhere(x)[:, 0]
    grps = tuple(list(g) for _, g in groupby(x, lambda n, c=count(): n-next(c)))
    return ",".join("{:d}-{:d}".format(i[0], i[-1]) if len(i) > 1 else "{:d}".format(i[0]) for i in grps) 
开发者ID:pyscf,项目名称:pyscf,代码行数:18,代码来源:common_slow.py

示例2: add_comp_one

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = re.sub(r"([A-Z])", r" \1", compstr).split()
    sample = ["".join(g) for _, g in groupby(str(sample), str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]

    return samp_new 
开发者ID:materialsproject,项目名称:MPContribs,代码行数:19,代码来源:views.py

示例3: add_comp_one

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = pd.np.array(re.sub(r"([A-Z])", r" \1", compstr).split()).astype(str)
    sample = ["".join(g) for _, g in groupby(sample, str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]
    return samp_new 
开发者ID:materialsproject,项目名称:MPContribs,代码行数:18,代码来源:pre_submission.py

示例4: _spanning_iterator

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def _spanning_iterator(self):
        ''' implements basic spanning on the python side operating on Rows '''
        # TODO implement in Java and support not only Rows

        columns = set(str(c) for c in self.columns)

        def spanning_iterator(partition):
            def key_by(columns):
                for row in partition:
                    k = Row(**{c: row.__getattr__(c) for c in columns})
                    for c in columns:
                        del row[c]

                    yield (k, row)

            for g, l in groupby(key_by(columns), itemgetter(0)):
                yield g, list(_[1] for _ in l)

        return spanning_iterator 
开发者ID:TargetHolding,项目名称:pyspark-cassandra,代码行数:21,代码来源:rdd.py

示例5: _events_find

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def _events_find(event_channel, threshold="auto", threshold_keep="above"):
    binary = signal_binarize(event_channel, threshold=threshold)

    if threshold_keep.lower() != "above":
        binary = np.abs(binary - 1)  # Reverse if events are below

    # Initialize data
    events = {"onset": [], "duration": []}

    index = 0
    for event, group in itertools.groupby(binary):
        duration = len(list(group))
        if event == 1:
            events["onset"].append(index)
            events["duration"].append(duration)
        index += duration

    # Convert to array
    events["onset"] = np.array(events["onset"])
    events["duration"] = np.array(events["duration"])
    return events 
开发者ID:neuropsychology,项目名称:NeuroKit,代码行数:23,代码来源:events_find.py

示例6: apply

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def apply(self, solver, players_dict):
        optimizer = self.optimizer
        extra_positions = optimizer.players_with_same_position
        positions_combinations = set([tuple(sorted(player.positions)) for player in players_dict.keys()
                                      if len(player.positions) > 1])
        for rank, rank_positions in groupby(optimizer.settings.positions, lambda pos: pos.for_rank):
            positions = get_positions_for_optimizer(list(rank_positions), positions_combinations)
            unique_positions = optimizer.available_positions
            players_by_positions = {
                position: {variable for player, variable in players_dict.items()
                           if player.rank == rank and position in player.positions} for position in unique_positions
            }
            for position, places in positions.items():
                extra = 0
                if len(position) == 1:
                    extra = extra_positions.get(position[0], 0)
                players_with_position = set()
                for pos in position:
                    players_with_position.update(players_by_positions[pos])
                solver.add_constraint(players_with_position, None, SolverSign.GTE, places + extra) 
开发者ID:DimaKudosh,项目名称:pydfs-lineup-optimizer,代码行数:22,代码来源:rules.py

示例7: merge_edges

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def merge_edges(edges, snap_tolerance, join_tolerance):
    """
    Using the `snap_edges` and `join_edge_group` methods above, merge a list of edges into a more "seamless" list.
    """
    def get_group(edge):
        if edge["orientation"] == "h":
            return ("h", edge["top"])
        else:
            return ("v", edge["x0"])

    if snap_tolerance > 0:
        edges = snap_edges(edges, snap_tolerance)

    if join_tolerance > 0:
        _sorted = sorted(edges, key=get_group)
        edge_groups = itertools.groupby(_sorted, key=get_group)
        edge_gen = (join_edge_group(items, k[0], join_tolerance)
            for k, items in edge_groups)
        edges = list(itertools.chain(*edge_gen))
    return edges 
开发者ID:jsvine,项目名称:pdfplumber,代码行数:22,代码来源:table.py

示例8: cluster_objects

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def cluster_objects(objs, attr, tolerance):
    if isinstance(attr, (str, int)):
        attr_getter = itemgetter(attr)
    else:
        attr_getter = attr
    objs = to_list(objs)
    values = map(attr_getter, objs)
    cluster_dict = make_cluster_dict(values, tolerance)

    get_0, get_1 = itemgetter(0), itemgetter(1)

    cluster_tuples = sorted(((obj, cluster_dict.get(attr_getter(obj)))
        for obj in objs), key=get_1)

    grouped = itertools.groupby(cluster_tuples, key=get_1)

    clusters = [ list(map(get_0, v))
        for k, v in grouped ]

    return clusters 
开发者ID:jsvine,项目名称:pdfplumber,代码行数:22,代码来源:utils.py

示例9: group_dataset_by_category_and_instance

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def group_dataset_by_category_and_instance(self, dataset_split):
        """
        Group small NORB dataset for (category, instance) key
        
        Parameters
        ----------
        dataset_split: str
            Dataset split, can be either 'train' or 'test'

        Returns
        -------
        groups: list
            List of 25 groups of 972 elements each. All examples of each group are
            from the same category and instance
        """
        if dataset_split not in ['train', 'test']:
            raise ValueError('Dataset split "{}" not allowed.'.format(dataset_split))

        groups = []
        for key, group in groupby(iterable=sorted(self.data[dataset_split]),
                                  key=lambda x: (x.category, x.instance)):
            groups.append(list(group))

        return groups 
开发者ID:ndrplz,项目名称:small_norb,代码行数:26,代码来源:dataset.py

示例10: submissions

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def submissions(request: HttpRequest, course_slug: str, activity_slug: str) -> HttpResponse:
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person') \
        .order_by('student__person')

    students = set(a.student for a in answers)
    starts_ends = quiz.get_starts_ends(students)
    by_student = itertools.groupby(answers, key=lambda a: a.student)
    subs_late = [(member, max(a.modified_at for a in ans) - starts_ends[member][1]) for member, ans in by_student]

    context = {
        'offering': offering,
        'activity': activity,
        'quiz': quiz,
        'subs_late': subs_late,
        'timedelta_zero': datetime.timedelta(seconds=0)
    }
    return render(request, 'quizzes/submissions.html', context=context) 
开发者ID:sfu-fas,项目名称:coursys,代码行数:25,代码来源:views.py

示例11: _setup_download

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def _setup_download(request: HttpRequest, course_slug: str, activity_slug: str):
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)
    versions = QuestionVersion.objects.filter(question__in=questions)
    version_number_lookup = {  # version_number_lookup[question_id][version_id] == version_number
        q_id: {v.id: i+1 for i,v in enumerate(vs)}
        for q_id, vs in itertools.groupby(versions, key=lambda v: v.question_id)
    }

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person', 'question_version', 'question') \
        .order_by('student__person')

    by_student = itertools.groupby(answers, key=lambda a: a.student)
    multiple_versions = len(questions) != len(versions)

    return activity, questions, version_number_lookup, by_student, multiple_versions 
开发者ID:sfu-fas,项目名称:coursys,代码行数:21,代码来源:views.py

示例12: courses_json

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def courses_json(request, semester):
    offerings = CourseOffering.objects.filter(semester__name=semester)\
        .exclude(component="CAN").exclude(flags=CourseOffering.flags.combined) \
        .select_related('semester').prefetch_related('meetingtime_set')
    instructors = Member.objects.filter(role='INST', offering__semester__name=semester).select_related('person')
    instr_by_offeringid = dict(
        (oid, list(instr))
        for oid, instr
        in itertools.groupby(instructors, lambda m: m.offering_id)
    )

    resp = HttpResponse(content_type="application/json")
    resp['Content-Disposition'] = 'inline; filename="' + semester + '.json"'
    crs_data = (o.export_dict(instructors=instr_by_offeringid.get(o.id, [])) for o in offerings)
    json.dump({'courses': list(crs_data)}, resp, indent=1)
    return resp 
开发者ID:sfu-fas,项目名称:coursys,代码行数:18,代码来源:views.py

示例13: deduplicate

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def deduplicate(cls, start_date=None, end_date=None, dry_run=False):
        """
        Remove any EnrolmentHistory objects that aren't adding any new information.
        """
        all_ehs = EnrolmentHistory.objects.order_by('offering', 'date')
        if start_date:
            all_ehs = all_ehs.filter(date__gte=start_date)
        if end_date:
            all_ehs = all_ehs.filter(date__lte=end_date)

        for off_id, ehs in itertools.groupby(all_ehs, key=lambda eh: eh.offering_id):
            # iterate through EnrolmentHistory for this offering and purge any "same as yesterday" entries
            with transaction.atomic():
                current = next(ehs)
                for eh in ehs:
                    if current.is_dup(eh):
                        if not dry_run:
                            eh.delete()
                        else:
                            print('delete', eh)
                    else:
                        current = eh 
开发者ID:sfu-fas,项目名称:coursys,代码行数:24,代码来源:models.py

示例14: import_joint

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def import_joint(extra_where='1=1'):
    """
    Find combined sections and set CourseOffering.config['joint_with'] appropriately.
    """
    db = SIMSConn()
    db.execute("SELECT strm, class_nbr, sctn_combined_id FROM ps_sctn_cmbnd c WHERE c.strm IN %s "
               " AND ("+extra_where+")", (import_semesters(),))

    for k,v in itertools.groupby(db, lambda d: (d[0], d[2])):
        # for each combined offering...
        strm, _ = k
        class_nbrs = [int(class_nbr) for _,class_nbr,_ in v]
        offerings = CourseOffering.objects.filter(semester__name=strm, class_nbr__in=class_nbrs)
        for offering in offerings:
            offering.set_joint_with([o.slug for o in offerings if o != offering])
            offering.save() 
开发者ID:sfu-fas,项目名称:coursys,代码行数:18,代码来源:importer.py

示例15: get_bad_gpa

# 需要导入模块: import itertools [as 别名]
# 或者: from itertools import groupby [as 别名]
def get_bad_gpa(self):
        current_semester = Semester.current()
        semesters = [current_semester.name, current_semester.offset_name(-1), current_semester.offset_name(-2)]
        cmpt_acad_progs, eng_acad_progs = get_fas_programs()

        cmpt_gpas = BadGPAsQuery(query_args={
            'acad_progs': cmpt_acad_progs,
            'strms': semesters,
            'gpa': '2.4',
        })
        low_gpas = cmpt_gpas.result()
        self.artifacts.append(low_gpas)

        rows = low_gpas.rows
        rows.sort()
        groups = itertools.groupby(rows, CSGenderExplorationReport.group_bin)
        out_rows = [[prog_gpa[0], prog_gpa[1], prog_gpa[2], len(list(students))] for prog_gpa, students in groups]
        bins = Table()
        bins.append_column('ACAD_PROG_PRIMARY')
        bins.append_column('GENDER')
        bins.append_column('GPA')
        bins.append_column('COUNT')
        bins.rows = out_rows
        self.artifacts.append(bins) 
开发者ID:sfu-fas,项目名称:coursys,代码行数:26,代码来源:cs_gender_exploration.py


注:本文中的itertools.groupby方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。