当前位置: 首页>>代码示例>>Python>>正文


Python tools.first_lower函数代码示例

本文整理汇总了Python中pywikibot.tools.first_lower函数的典型用法代码示例。如果您正苦于以下问题:Python first_lower函数的具体用法?Python first_lower怎么用?Python first_lower使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了first_lower函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ReplaceLink

    def ReplaceLink(self, text, oldtxt, newtxt):

        frmParts = [s.strip(self.stripChars) for s in self.wordBreaker.split(oldtxt)]
        toParts = [s.strip(self.stripChars) for s in self.wordBreaker.split(newtxt)]

        if len(frmParts) != len(toParts):
            raise ValueError("Splitting parts do not match counts")
        for i in xrange(0, len(frmParts)):
            if len(frmParts[i]) != len(toParts[i]):
                raise ValueError("Splitting parts do not match word length")
            if len(frmParts[i]) > 0:
                text = text.replace(first_lower(frmParts[i]), first_lower(toParts[i]))
                text = text.replace(first_upper(frmParts[i]), first_upper(toParts[i]))
        return text
开发者ID:happy5214,项目名称:pywikibot-core,代码行数:14,代码来源:casechecker.py

示例2: treat_page_and_item

 def treat_page_and_item(self, page, item):
     title = page.properties().get('displaytitle')
     if not title:
         return
     page_title = page.title()
     if first_lower(page_title) != title:
         return
     lang = page.site.lang
     label = item.labels.get(lang)
     if not label or self.stripped(label) == self.stripped(page_title):
         item.labels[lang] = first_lower(label) if label else title
         summary = 'importing [%s] label from displaytitle in %s' % (
             lang, page.title(as_link=True, insite=item.site))
         self.user_edit_entity(item, summary=summary)
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:14,代码来源:import_displaytitle.py

示例3: translateAndCapitalizeNamespaces

    def translateAndCapitalizeNamespaces(self, text):
        """Use localized namespace names."""
        # arz uses english stylish codes
        if self.site.sitename == 'wikipedia:arz':
            return text
        # wiki links aren't parsed here.
        exceptions = ['nowiki', 'comment', 'math', 'pre']

        for namespace in self.site.namespaces.values():
            if namespace == 0:
                # skip main (article) namespace
                continue
            # a clone is needed. Won't change the namespace dict
            namespaces = list(namespace)
            if namespace == 6 and self.site.family.name == 'wikipedia':
                if self.site.code in ('en', 'fr') and MediaWikiVersion(
                        self.site.version()) >= MediaWikiVersion('1.14'):
                    # do not change "Image" on en-wiki and fr-wiki
                    assert u'Image' in namespaces
                    namespaces.remove(u'Image')
                if self.site.code == 'hu':
                    # do not change "Kép" on hu-wiki
                    assert u'Kép' in namespaces
                    namespaces.remove(u'Kép')
                elif self.site.code == 'pt':
                    # use "Imagem" by default on pt-wiki (per T57242)
                    assert 'Imagem' in namespaces
                    namespaces.insert(
                        0, namespaces.pop(namespaces.index('Imagem')))
            # final namespace variant
            final_ns = namespaces.pop(0)
            if namespace in (2, 3):
                # skip localized user namespace, maybe gender is used
                namespaces = ['User' if namespace == 2 else 'User talk']
            # lowerspaced and underscored namespaces
            for i, item in enumerate(namespaces):
                item = item.replace(' ', '[ _]')
                item = u'[%s%s]' % (item[0], item[0].lower()) + item[1:]
                namespaces[i] = item
            namespaces.append(first_lower(final_ns))
            if final_ns and namespaces:
                if self.site.sitename == 'wikipedia:pt' and namespace == 6:
                    # only change on these file extensions (per T57242)
                    extensions = ('png', 'gif', 'jpg', 'jpeg', 'svg', 'tiff',
                                  'tif')
                    text = textlib.replaceExcept(
                        text,
                        r'\[\[\s*({}) *:(?P<name>[^\|\]]*?\.({}))'
                        r'(?P<label>.*?)\]\]'
                        .format('|'.join(namespaces), '|'.join(extensions)),
                        r'[[{}:\g<name>\g<label>]]'.format(final_ns),
                        exceptions)
                else:
                    text = textlib.replaceExcept(
                        text,
                        r'\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]'
                        % '|'.join(namespaces),
                        r'[[%s:\g<nameAndLabel>]]' % final_ns,
                        exceptions)
        return text
开发者ID:Zeffar,项目名称:Elobot,代码行数:60,代码来源:cosmetic_changes.py

示例4: correctcap

def correctcap(link, text):
    # If text links to a page with title link uncapitalized, uncapitalize link,
    # otherwise capitalize it
    linkupper = link.title()
    linklower = first_lower(linkupper)
    if "[[%s]]" % linklower in text or "[[%s|" % linklower in text:
        return linklower
    else:
        return linkupper
开发者ID:donkaban,项目名称:pywiki-bot,代码行数:9,代码来源:solve_disambiguation.py

示例5: get_missing_labels

 def get_missing_labels(self, sitelinks, dont):
     labels = {}
     for dbname, title in sitelinks.items():
         if ':' not in title and '/' in title:
             continue
         lang = self.normalize_lang(dbname.partition('wik')[0])
         if lang and lang not in dont:
             # [[d:Topic:Uhdjlv9aae6iijuc]]
             # todo: create a lib for this
             if lang == 'fr' and title.startswith(
                     ('Abbaye ', 'Cathédrale ', 'Chapelle ', 'Cloître ',
                      'Couvent ', 'Monastère ', 'Église ')):
                 title = first_lower(title)
             label = labels.get(lang)
             if label and first_lower(label) != first_lower(title):
                 labels.pop(lang)  # todo: better handling
                 dont.add(lang)
                 continue
             labels[lang] = title
     return labels
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:20,代码来源:wikidata_cleanup_toolkit.py

示例6: correctcap

def correctcap(link, text):
    """
    Capitalize link.

    If text links to a page with title link uncapitalized, uncapitalize link,
    otherwise capitalize it
    """
    linkupper = link.title()
    linklower = first_lower(linkupper)
    if "[[{0!s}]]".format(linklower) in text or "[[{0!s}|".format(linklower) in text:
        return linklower
    else:
        return linkupper
开发者ID:runt18,项目名称:pywikibot-core,代码行数:13,代码来源:solve_disambiguation.py

示例7: correctcap

def correctcap(link, text):
    """Return the link capitalized/uncapitalized according to the text.

    @param link: link page
    @type link: pywikibot.Page
    @param text: the wikitext that is supposed to refer to the link
    @type text: str
    @return: uncapitalized title of the link if the text links to the link
        with an uncapitalized title, else capitalized
    @rtype: str

    """
    linkupper = link.title()
    linklower = first_lower(linkupper)
    if "[[%s]]" % linklower in text or "[[%s|" % linklower in text:
        return linklower
    else:
        return linkupper
开发者ID:hasteur,项目名称:g13bot_tools_new,代码行数:18,代码来源:solve_disambiguation.py

示例8: translateAndCapitalizeNamespaces

    def translateAndCapitalizeNamespaces(self, text):
        """Use localized namespace names."""
        # arz uses english stylish codes
        if self.site.sitename == 'wikipedia:arz':
            return text
        family = self.site.family
        # wiki links aren't parsed here.
        exceptions = ['nowiki', 'comment', 'math', 'pre']

        for namespace in self.site.namespaces.values():
            if namespace.id in (0, 2, 3):
                # skip main (article) namespace
                # skip user namespace, maybe gender is used
                continue
            # a clone is needed. Won't change the namespace dict
            namespaces = list(namespace)
            thisNs = namespaces.pop(0)
            if namespace.id == 6 and family.name == 'wikipedia':
                if self.site.code in ('en', 'fr') and \
                   MediaWikiVersion(self.site.version()) >= MediaWikiVersion('1.14'):
                    # do not change "Image" on en-wiki and fr-wiki
                    assert u'Image' in namespaces
                    namespaces.remove(u'Image')
                if self.site.code == 'hu':
                    # do not change "Kép" on hu-wiki
                    assert u'Kép' in namespaces
                    namespaces.remove(u'Kép')
                elif self.site.code == 'pt':
                    # TODO: bug T57242
                    continue
            # lowerspaced and underscored namespaces
            for i in range(len(namespaces)):
                item = namespaces[i].replace(' ', '[ _]')
                item = u'[%s%s]' % (item[0], item[0].lower()) + item[1:]
                namespaces[i] = item
            namespaces.append(first_lower(thisNs))
            if thisNs and namespaces:
                text = textlib.replaceExcept(
                    text,
                    r'\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]'
                    % '|'.join(namespaces),
                    r'[[%s:\g<nameAndLabel>]]' % thisNs,
                    exceptions)
        return text
开发者ID:PersianWikipedia,项目名称:pywikibot-core,代码行数:44,代码来源:cosmetic_changes.py

示例9: translateAndCapitalizeNamespaces

    def translateAndCapitalizeNamespaces(self, text):
        """Use localized namespace names."""
        # arz uses english stylish codes
        if self.site.sitename == "wikipedia:arz":
            return text
        family = self.site.family
        # wiki links aren't parsed here.
        exceptions = ["nowiki", "comment", "math", "pre"]

        for namespace in self.site.namespaces.values():
            if namespace.id in (0, 2, 3):
                # skip main (article) namespace
                # skip user namespace, maybe gender is used
                continue
            # a clone is needed. Won't change the namespace dict
            namespaces = list(namespace)
            thisNs = namespaces.pop(0)
            if namespace.id == 6 and family.name == "wikipedia":
                if self.site.code in ("en", "fr") and MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.14"):
                    # do not change "Image" on en-wiki and fr-wiki
                    assert "Image" in namespaces
                    namespaces.remove("Image")
                if self.site.code == "hu":
                    # do not change "Kép" on hu-wiki
                    assert "Kép" in namespaces
                    namespaces.remove("Kép")
                elif self.site.code == "pt":
                    # TODO: bug T57242
                    continue
            # lowerspaced and underscored namespaces
            for i in range(len(namespaces)):
                item = namespaces[i].replace(" ", "[ _]")
                item = "[%s%s]" % (item[0], item[0].lower()) + item[1:]
                namespaces[i] = item
            namespaces.append(first_lower(thisNs))
            if thisNs and namespaces:
                text = textlib.replaceExcept(
                    text,
                    r"\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]" % "|".join(namespaces),
                    r"[[%s:\g<nameAndLabel>]]" % thisNs,
                    exceptions,
                )
        return text
开发者ID:h4ck3rm1k3,项目名称:pywikibot-core,代码行数:43,代码来源:cosmetic_changes.py

示例10: fix_languages

 def fix_languages(self, data):
     ret = False
     for lang, norm in self.lang_map.items():
         label = data['labels'].get(lang)
         if not label:
             continue
         if norm:
             if norm in data['labels']:
                 aliases = data['aliases'].get(norm, [])
                 if label not in map(first_lower, aliases):
                     aliases.append(label)
                     data['aliases'][norm] = aliases
             else:
                 data['labels'][norm] = label
         data['labels'][lang] = ''
         ret = True
     for lang, norm in self.lang_map.items():
         description = data['descriptions'].get(lang)
         if description:
             if norm and norm not in data['descriptions']:
                 data['descriptions'][norm] = description
             data['descriptions'][lang] = ''
             ret = True
     for lang, norm in self.lang_map.items():
         old_aliases = data['aliases'].get(lang)
         if old_aliases:
             if norm:
                 new_aliases = data['aliases'].get(norm, [])
                 already = set(map(first_lower, new_aliases))
                 if norm in data['labels']:
                     already.add(first_lower(data['labels'][norm]))
                 for alias in old_aliases:
                     if alias not in already:
                         new_aliases.append(alias)
                         already.add(alias)
                 data['aliases'][norm] = new_aliases
             data['aliases'][lang] = []
             ret = True
     return ret
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:39,代码来源:wikidata_cleanup_toolkit.py

示例11: treat


#.........这里部分代码省略.........
                    # small chunk of text to search
                    search_text = text[m.end():m.end() + context]
                    # figure out where the link (and sentance) ends, put note
                    # there
                    end_of_word_match = re.search(r'\s', search_text)
                    if end_of_word_match:
                        position_split = end_of_word_match.start(0)
                    else:
                        position_split = 0
                    # insert dab needed template
                    text = (text[:m.end() + position_split] +
                            self.dn_template_str +
                            text[m.end() + position_split:])
                    dn = True
                    continue
                elif choice in ['u', 'U']:
                    # unlink - we remove the section if there's any
                    text = text[:m.start()] + link_text + text[m.end():]
                    unlink_counter += 1
                    continue
                else:
                    if len(choice) > 0 and choice[0] == 'r':
                        # we want to throw away the original link text
                        replaceit = link_text == page_title
                        choice = choice[1:]
                    elif include == "redirect":
                        replaceit = True
                    else:
                        replaceit = False

                    try:
                        choice = int(choice)
                    except ValueError:
                        pywikibot.output(u"Unknown option")
                        # step back to ask the user again what to do with the
                        # current link
                        curpos -= 1
                        continue
                    if choice >= len(self.alternatives) or choice < 0:
                        pywikibot.output(
                            u"Choice out of range. Please select a number "
                            u"between 0 and %i." % (len(self.alternatives) - 1))
                        # show list of possible choices
                        self.listAlternatives()
                        # step back to ask the user again what to do with the
                        # current link
                        curpos -= 1
                        continue
                    new_page_title = self.alternatives[choice]
                    repPl = pywikibot.Page(pywikibot.Link(new_page_title,
                                                          disambPage.site))
                    if (new_page_title[0].isupper() or
                            link_text[0].isupper()):
                        new_page_title = repPl.title()
                    else:
                        new_page_title = repPl.title()
                        new_page_title = first_lower(new_page_title)
                    if new_page_title not in new_targets:
                        new_targets.append(new_page_title)
                    if replaceit and trailing_chars:
                        newlink = "[[%s%s]]%s" % (new_page_title,
                                                  section,
                                                  trailing_chars)
                    elif replaceit or (new_page_title == link_text and
                                       not section):
                        newlink = "[[%s]]" % new_page_title
                    # check if we can create a link with trailing characters
                    # instead of a pipelink
                    elif (
                        (len(new_page_title) <= len(link_text)) and
                        (firstcap(link_text[:len(new_page_title)]) == firstcap(new_page_title)) and
                        (re.sub(self.trailR, '', link_text[len(new_page_title):]) == '') and
                        (not section)
                    ):
                        newlink = "[[%s]]%s" \
                                  % (link_text[:len(new_page_title)],
                                     link_text[len(new_page_title):])
                    else:
                        newlink = "[[%s%s|%s]]" \
                                  % (new_page_title, section, link_text)
                    text = text[:m.start()] + newlink + text[m.end():]
                    continue

                pywikibot.output(text[max(0, m.start() - 30):m.end() + 30])
            if text == original_text:
                pywikibot.output(u'\nNo changes have been made:\n')
            else:
                pywikibot.output(u'\nThe following changes have been made:\n')
                pywikibot.showDiff(original_text, text)
                pywikibot.output(u'')
                # save the page
                self.setSummaryMessage(disambPage, new_targets, unlink_counter,
                                       dn)
                try:
                    refPage.put_async(text, summary=self.comment)
                except pywikibot.LockedPage:
                    pywikibot.output(u'Page not saved: page is locked')
                except pywikibot.PageNotSaved as error:
                    pywikibot.output(u'Page not saved: %s' % error.args)
        return True
开发者ID:donkaban,项目名称:pywiki-bot,代码行数:101,代码来源:solve_disambiguation.py

示例12: replace_links

    def replace_links(self, text, linkedPage, targetPage):
        """Replace all source links by target."""
        mysite = pywikibot.Site()
        linktrail = mysite.linktrail()

        # make a backup of the original text so we can show the changes later
        linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?'
                           r'(\|(?P<label>[^\]]*))?\]\](?P<linktrail>' + linktrail + ')')
        curpos = 0
        # This loop will run until we have finished the current page
        while True:
            m = linkR.search(text, pos=curpos)
            if not m:
                break
            # Make sure that next time around we will not find this same hit.
            curpos = m.start() + 1
            # ignore interwiki links and links to sections of the same page
            if m.group('title').strip() == '' or \
               mysite.isInterwikiLink(m.group('title')):
                continue
            else:
                actualLinkPage = pywikibot.Page(targetPage.site, m.group('title'))
                # Check whether the link found is to page.
                if actualLinkPage != linkedPage:
                    continue

            # The link looks like this:
            # [[page_title|link_text]]trailing_chars
            page_title = m.group('title')
            link_text = m.group('label')

            if not link_text:
                # or like this: [[page_title]]trailing_chars
                link_text = page_title
            if m.group('section') is None:
                section = ''
            else:
                section = m.group('section')
            if section and targetPage.section():
                pywikibot.warning(
                    'Source section {0} and target section {1} found. '
                    'Skipping.'.format(section, targetPage))
                continue
            trailing_chars = m.group('linktrail')
            if trailing_chars:
                link_text += trailing_chars

            # remove preleading ":"
            if link_text[0] == ':':
                link_text = link_text[1:]
            if link_text[0].isupper() or link_text[0].isdigit():
                new_page_title = targetPage.title()
            else:
                new_page_title = first_lower(targetPage.title())

            # remove preleading ":"
            if new_page_title[0] == ':':
                new_page_title = new_page_title[1:]

            if (new_page_title == link_text and not section):
                newlink = "[[%s]]" % new_page_title
            # check if we can create a link with trailing characters instead of a
            # pipelink
            elif (len(new_page_title) <= len(link_text) and
                  firstcap(link_text[:len(new_page_title)]) ==
                  firstcap(new_page_title) and
                  re.sub(re.compile(linktrail), '',
                         link_text[len(new_page_title):]) == '' and
                  not section):
                newlink = "[[%s]]%s" % (link_text[:len(new_page_title)],
                                        link_text[len(new_page_title):])
            else:
                newlink = "[[%s%s|%s]]" % (new_page_title, section, link_text)
            text = text[:m.start()] + newlink + text[m.end():]
            continue
        return text
开发者ID:AbdealiJK,项目名称:pywikibot-core,代码行数:76,代码来源:fixing_redirects.py

示例13: handleOneLink

        def handleOneLink(match):
            titleWithSection = match.group('titleWithSection')
            label = match.group('label')
            trailingChars = match.group('linktrail')
            newline = match.group('newline')

            if not self.site.isInterwikiLink(titleWithSection):
                # The link looks like this:
                # [[page_title|link_text]]trailing_chars
                # We only work on namespace 0 because pipes and linktrails work
                # differently for images and categories.
                page = pywikibot.Page(pywikibot.Link(titleWithSection,
                                                     self.site))
                try:
                    namespace = page.namespace()
                except pywikibot.InvalidTitle:
                    return match.group()
                if namespace == 0:
                    # Replace underlines by spaces, also multiple underlines
                    titleWithSection = re.sub('_+', ' ', titleWithSection)
                    # Remove double spaces
                    titleWithSection = re.sub('  +', ' ', titleWithSection)
                    # Remove unnecessary leading spaces from title,
                    # but remember if we did this because we eventually want
                    # to re-add it outside of the link later.
                    titleLength = len(titleWithSection)
                    titleWithSection = titleWithSection.lstrip()
                    hadLeadingSpaces = (len(titleWithSection) != titleLength)
                    hadTrailingSpaces = False
                    # Remove unnecessary trailing spaces from title,
                    # but remember if we did this because it may affect
                    # the linktrail and because we eventually want to
                    # re-add it outside of the link later.
                    if not trailingChars:
                        titleLength = len(titleWithSection)
                        titleWithSection = titleWithSection.rstrip()
                        hadTrailingSpaces = (len(titleWithSection) !=
                                             titleLength)

                    # Convert URL-encoded characters to unicode
                    from pywikibot.page import url2unicode
                    titleWithSection = url2unicode(titleWithSection,
                                                   encodings=self.site)

                    if titleWithSection == '':
                        # just skip empty links.
                        return match.group()

                    # Remove unnecessary initial and final spaces from label.
                    # Please note that some editors prefer spaces around pipes.
                    # (See [[en:Wikipedia:Semi-bots]]). We remove them anyway.
                    if label is not None:
                        # Remove unnecessary leading spaces from label,
                        # but remember if we did this because we want
                        # to re-add it outside of the link later.
                        labelLength = len(label)
                        label = label.lstrip()
                        hadLeadingSpaces = (len(label) != labelLength)
                        # Remove unnecessary trailing spaces from label,
                        # but remember if we did this because it affects
                        # the linktrail.
                        if not trailingChars:
                            labelLength = len(label)
                            label = label.rstrip()
                            hadTrailingSpaces = (len(label) != labelLength)
                    else:
                        label = titleWithSection
                    if trailingChars:
                        label += trailingChars

                    if titleWithSection == label or \
                       first_lower(titleWithSection) == label:
                        newLink = "[[%s]]" % label
                    # Check if we can create a link with trailing characters
                    # instead of a pipelink
                    elif (len(titleWithSection) <= len(label) and
                          label[:len(titleWithSection)] == titleWithSection and
                          re.sub(trailR, '',
                                 label[len(titleWithSection):]) == ''):
                        newLink = "[[%s]]%s" % (label[:len(titleWithSection)],
                                                label[len(titleWithSection):])
                    else:
                        # Try to capitalize the first letter of the title.
                        # Not useful for languages that don't capitalize nouns.
                        # TODO: Add a configuration variable for each site,
                        # which determines if the link target is written in
                        # uppercase
                        if self.site.sitename == 'wikipedia:de':
                            titleWithSection = first_upper(titleWithSection)
                        newLink = "[[%s|%s]]" % (titleWithSection, label)
                    # re-add spaces that were pulled out of the link.
                    # Examples:
                    #   text[[ title ]]text        -> text [[title]] text
                    #   text[[ title | name ]]text -> text [[title|name]] text
                    #   text[[ title |name]]text   -> text[[title|name]]text
                    #   text[[title| name]]text    -> text [[title|name]]text
                    if hadLeadingSpaces and not newline:
                        newLink = ' ' + newLink
                    if hadTrailingSpaces:
                        newLink = newLink + ' '
#.........这里部分代码省略.........
开发者ID:PersianWikipedia,项目名称:pywikibot-core,代码行数:101,代码来源:cosmetic_changes.py

示例14: in

    genFactory = pagegenerators.GeneratorFactory(site=site)
    for ns in (0, 14, 100):
        if family != 'wikisource' and ns == 100: # fixme: cswikiquote
            continue
        if family == 'wikisource' and ns == 0:
            continue
        genFactory.handleArg('-ns:%i' % ns)
    genFactory.handleArg('-unconnectedpages')
    generator = genFactory.getCombinedGenerator(preload=True)

    for page in generator:
        if page.namespace() != 14 and page.isDisambig():
            continue

        for template, fields in textlib.extract_templates_and_params(page.text):
            if first_lower(template) not in tp_map[project].keys():
                continue

            params = tp_map[project][first_lower(template)]
            for key in fields.keys():
                if key not in params.keys():
                    continue

                title = fields[key].strip()
                if not title:
                    continue

                target_lang = lang
                target_family = family
                if isinstance(params[key], dict):
                    if params[key].get('namespaces', []) and page.namespace() not in params[key]['namespaces']:
开发者ID:matejsuchanek,项目名称:pywikibot-scripts,代码行数:31,代码来源:connect.py

示例15: treat

def treat(text, linkedPage, targetPage):
    """Based on the method of the same name in solve_disambiguation.py."""
    mysite = pywikibot.Site()
    linktrail = mysite.linktrail()

    # make a backup of the original text so we can show the changes later
    linkR = re.compile(
        r"\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?" r"(\|(?P<label>[^\]]*))?\]\](?P<linktrail>" + linktrail + ")"
    )
    curpos = 0
    # This loop will run until we have finished the current page
    while True:
        m = linkR.search(text, pos=curpos)
        if not m:
            break
        # Make sure that next time around we will not find this same hit.
        curpos = m.start() + 1
        # ignore interwiki links and links to sections of the same page
        if m.group("title").strip() == "" or mysite.isInterwikiLink(m.group("title")):
            continue
        else:
            actualLinkPage = pywikibot.Page(targetPage.site, m.group("title"))
            # Check whether the link found is to page.
            if actualLinkPage != linkedPage:
                continue

        choice = "y"

        # The link looks like this:
        # [[page_title|link_text]]trailing_chars
        page_title = m.group("title")
        link_text = m.group("label")

        if not link_text:
            # or like this: [[page_title]]trailing_chars
            link_text = page_title
        if m.group("section") is None:
            section = ""
        else:
            section = m.group("section")
        trailing_chars = m.group("linktrail")
        if trailing_chars:
            link_text += trailing_chars

        if choice in "uU":
            # unlink - we remove the section if there's any
            text = text[: m.start()] + link_text + text[m.end() :]
            continue
        replaceit = choice in "rR"

        # remove preleading ":"
        if link_text[0] == ":":
            link_text = link_text[1:]
        if link_text[0].isupper():
            new_page_title = targetPage.title()
        else:
            new_page_title = first_lower(targetPage.title())

        # remove preleading ":"
        if new_page_title[0] == ":":
            new_page_title = new_page_title[1:]

        if replaceit and trailing_chars:
            newlink = "[[%s%s]]%s" % (new_page_title, section, trailing_chars)
        elif replaceit or (new_page_title == link_text and not section):
            newlink = "[[%s]]" % new_page_title
        # check if we can create a link with trailing characters instead of a
        # pipelink
        elif (
            len(new_page_title) <= len(link_text)
            and firstcap(link_text[: len(new_page_title)]) == firstcap(new_page_title)
            and re.sub(re.compile(linktrail), "", link_text[len(new_page_title) :]) == ""
            and not section
        ):
            newlink = "[[%s]]%s" % (link_text[: len(new_page_title)], link_text[len(new_page_title) :])
        else:
            newlink = "[[%s%s|%s]]" % (new_page_title, section, link_text)
        text = text[: m.start()] + newlink + text[m.end() :]
        continue
    return text
开发者ID:hasteur,项目名称:pywikibot_scripts,代码行数:80,代码来源:fixing_redirects.py


注:本文中的pywikibot.tools.first_lower函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。