当前位置: 首页>>代码示例>>Python>>正文


Python pywikibot.getSite函数代码示例

本文整理汇总了Python中pywikibot.getSite函数的典型用法代码示例。如果您正苦于以下问题:Python getSite函数的具体用法?Python getSite怎么用?Python getSite使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了getSite函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    startpage = 'Anarana iombonana amin\'ny teny malagasy'
    pages = pagegenerators.CategorizedPageGenerator(catlib.Category(pywikibot.getSite('mg','wiktionary'), startpage))
    for page in pages:
        pagename = page.title()
        try:
            t_p = page.get()
        except wikipedia.NoPage:
            print '  Tsy misy pejy.'
            t_p = ''
        except wikipedia.IsRedirectPage:
            print '  Pejy fihodinana.'
            continue
        except wikipedia.Error:
            print '  Hadisoana.'
            continue
        f_pages = traite(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho lohatenimpejy
        c_pages = tupleur(pagename) # mamerina tuple [1s, 2s, 3s, 1pi, 1pp, 2pp, 3pp] ho votoatimpejy
        cont = 0
        b = 0
        while cont <= 6:
            try:
                wikipedia.output((wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).get()))
                b += 1
                print b
                cont += 6

            except wikipedia.NoPage:
                try:
                    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), f_pages[cont]).put(c_pages[cont])
                    cont = cont + 1
                except UnicodeDecodeError :
                    cont = cont + 1
                    continue
            if cont >= 6: break
开发者ID:Webysther,项目名称:botjagwar,代码行数:35,代码来源:malagasy_declinaison.py

示例2: __iter__

 def __iter__(self):
     """Yield page objects until the entire XML dump has been read."""
     from pywikibot import xmlreader
     mysite = pywikibot.getSite()
     dump = xmlreader.XmlDump(self.xmlfilename)
     # regular expression to find the original template.
     # {{vfd}} does the same thing as {{Vfd}}, so both will be found.
     # The old syntax, {{msg:vfd}}, will also be found.
     # TODO: check site.nocapitalize()
     templatePatterns = []
     for template in self.templates:
         templatePattern = template.titleWithoutNamespace()
         if not pywikibot.getSite().nocapitalize:
             templatePattern = '[%s%s]%s' % (templatePattern[0].upper(),
                                             templatePattern[0].lower(),
                                             templatePattern[1:])
         templatePattern = re.sub(' ', '[_ ]', templatePattern)
         templatePatterns.append(templatePattern)
     templateRegex = re.compile(
         r'\{\{ *([mM][sS][gG]:)?(?:%s) *(?P<parameters>\|[^}]+|) *}}'
                                % '|'.join(templatePatterns))
     for entry in dump.parse():
         if templateRegex.search(entry.text):
             page = pywikibot.Page(mysite, entry.title)
             yield page
开发者ID:reza1615,项目名称:pywikipedia-rewrite,代码行数:25,代码来源:template.py

示例3: addCats

	def addCats(self):
		text = u"""
[[Categorie:Filme românești]]
[[Categorie:Filme în limba română]]
"""
		if self._year:
			text += u"[[Categorie:Filme din %d]]\n" % self._year
		if self._director:
			directors = self._director.split(",")
			for director in directors:
				cat = u"Categorie:Filme regizate de %s" % director.strip()
				cat = pywikibot.Category(pywikibot.getSite(), cat)
				if cat.exists():
					text += u"[[Categorie:Filme regizate de %s]]\n" % director.strip()
		for t in self._types:
			cat = u"Filme de %s" % t.lower()
			catp = None
			if cat in categories:
				catp = pywikibot.Category(pywikibot.getSite(), categories[cat])
			if not catp or not catp.exists():
				catp = pywikibot.Category(pywikibot.getSite(), cat)

			if catp.exists():
				for p in catp.templatesWithParams():
					if p[0].title() == "Format:Redirect categorie":
						break
				else:
					text += u"[[%s]]\n" % catp.title()
		self._text += text
开发者ID:rowiki,项目名称:wikiro,代码行数:29,代码来源:filme_ro.py

示例4: getTranslatedStringForUser

	def getTranslatedStringForUser(self):
		"""
		Gets the local namespace name for User pages. e.g. Bruker on no.

		Uses pywikibot.

		API method:
			https://no.wikipedia.org/w/api.php?action=query&meta=siteinfo
				 &siprop=namespaces&format=json
		"""
		try:
			logging.info("Fetching User Namespace Name")
			format_language = self.language
			if '_' in format_language:
				wikiSite = pywikibot.getSite(format_language.replace('_','-'))
			else:
				wikiSite = pywikibot.getSite(self.language)
			#print wikiSite
			r = pywikibot.data.api.Request(
				site=wikiSite, action="query", meta="siteinfo")
			r['siprop'] = u'namespaces'
			data = r.submit()
			if self.language == 'pt':
				localized_user = data['query']['namespaces']['2']['*']
				return localized_user.split('(')[0]
			else:
				return data['query']['namespaces']['2']['*']
		except pywikibot.exceptions.NoSuchSite, e:
			logging.error(e)		
开发者ID:uduwage,项目名称:Multilingual-Wikipedian-Research,代码行数:29,代码来源:CollectUsersWithTemplates.py

示例5: __init__

 def __init__(self, pageToUnlink, namespaces, always):
     self.pageToUnlink = pageToUnlink
     gen = pagegenerators.ReferringPageGenerator(pageToUnlink)
     if namespaces != []:
         gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
     self.generator = pagegenerators.PreloadingGenerator(gen)
     linktrail = pywikibot.getSite().linktrail()
     # The regular expression which finds links. Results consist of four
     # groups:
     #
     # group title is the target page title, that is, everything
     # before | or ].
     #
     # group section is the page section.
     # It'll include the # to make life easier for us.
     #
     # group label is the alternative link title, that's everything
     # between | and ].
     #
     # group linktrail is the link trail, that's letters after ]] which are
     # part of the word.
     # note that the definition of 'letter' varies from language to language.
     self.linkR = re.compile(r'\[\[(?P<title>[^\]\|#]*)(?P<section>#[^\]\|]*)?(\|(?P<label>[^\]]*))?\]\](?P<linktrail>%s)'
                             % linktrail)
     self.always = always
     self.done = False
     self.comment = i18n.twtranslate(pywikibot.getSite(), 'unlink-unlinking',
                                     self.pageToUnlink.title())
开发者ID:blueprintmrk,项目名称:pywikibot-core,代码行数:28,代码来源:unlink.py

示例6: __init__

 def __init__(self, page, filename, summary, dry, always):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
开发者ID:masao,项目名称:savemlak,代码行数:7,代码来源:put.py

示例7: save_translation_from_bridge_language

 def save_translation_from_bridge_language(self, infos):
     summary = "Dikan-teny avy amin'ny dikan-teny avy amin'i %(olang)s.wiktionary"%infos
     wikipage = self.output.wikipage(infos)
     try: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'])
     except UnicodeDecodeError: 
         mg_Page = wikipedia.Page(wikipedia.getSite('mg','wiktionary'), infos['entry'].decode('utf8'))
         
     try:
         if mg_Page.exists():
             pagecontent = mg_Page.get()
             if pagecontent.find('{{=%s=}}'%infos['lang'])!=-1:
                 if verbose: print "Efa misy ilay teny iditra."
                 self.output.db(infos)
                 return
             else:
                 wikipage += pagecontent
                 summary= u"+"+summary 
     except wikipedia.exceptions.IsRedirectPage:                
         infos['entry'] = mg_Page.getRedirectTarget().title()
         save_translation_from_bridge_language(self, infos, summary)
         return
     
     except wikipedia.exceptions.InvalidTitle:
         if verbose: print "lohateny tsy mety ho lohatenim-pejy"
         return
     
     except Exception:
         return
     
 
     if verbose: 
         wikipedia.output("\n \03{red}%(entry)s\03{default} : %(lang)s "%infos)
         wikipedia.output("\03{white}%s\03{default}"%wikipage)
     mg_Page.put_async(wikipage, summary)
开发者ID:Webysther,项目名称:botjagwar,代码行数:35,代码来源:dikantenyvaovao+-+Copie.py

示例8: MakeAppendix

def MakeAppendix(mot):
    verb = mot.title()
    form = """{{subst:-e-mat-vo|%s}}
[[sokajy:Volapoky/Matoanteny|%s]]"""%(verb[:-2], verb[0])
    
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Wiktionary:Raki-bolana volapoky/matoanteny/%s"%verb).put("#FIHODINANA [[Rakibolana:volapoky/matoanteny/%s]]"%verb)
    wikipedia.Page(wikipedia.getSite('mg','wiktionary'), "Rakibolana:volapoky/matoanteny/%s"%verb).put(form,'Matoanteny %s' %verb)
开发者ID:Webysther,项目名称:botjagwar,代码行数:7,代码来源:voverb.py

示例9: main

def main():
    featured = False
    gen = None

    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg == '-featured':
            featured = True
        else:
            genFactory.handleArg(arg)

    mysite = pywikibot.getSite()
    if mysite.sitename() == 'wikipedia:nl':
        pywikibot.output(
            u'\03{lightred}There is consensus on the Dutch Wikipedia that bots should not be used to fix redirects.\03{default}')
        sys.exit()

    if featured:
        featuredList = i18n.translate(mysite, featured_articles)
        ref = pywikibot.Page(pywikibot.getSite(), featuredList)
        gen = pagegenerators.ReferringPageGenerator(ref)
        gen = pagegenerators.NamespaceFilterPageGenerator(gen, [0])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if gen:
        for page in pagegenerators.PreloadingGenerator(gen):
            workon(page)
    else:
        pywikibot.showHelp('fixing_redirects')
开发者ID:octobertech,项目名称:pywikibot-core,代码行数:33,代码来源:fixing_redirects.py

示例10: __init__

    def __init__(self, generator, dry, always):
        """
        Constructor. Parameters:
            * generator - The page generator that determines on which pages
                          to work on.
            * dry       - If True, doesn't do any real changes, but only shows
                          what would have been changed.
            * always    - If True, don't prompt for each redirect page.
        """
        self.generator = generator
        self.dry = dry
        self.always = always
        self.lang = pywikibot.getSite().lang
        
        # Set the edit summary message
        self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
        self.templates = pywikibot.translate(pywikibot.getSite(), self.taxoboxTemplates)
        self.templateParameters = pywikibot.translate(pywikibot.getSite(), self.sciNameParameters)

        # Initialize the cache
        try:
            self.cache = pickle.load(file(self.cacheFilename, 'rb'))
        except:
            self.cache = {}
        if not self.lang in self.cache:
            self.cache[self.lang] = {}
开发者ID:silvonen,项目名称:pywikipedia-fi,代码行数:26,代码来源:sciname.py

示例11: checkWait

def checkWait():
        newlist = ""  # blank variable for later
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        waiters = page.get()
        waiters = waiters.replace("}}", "")
        waiters = waiters.replace("*{{User|", "")
        waiters = waiters.split("\n")
        for waiter in waiters:
                if waiter == "":continue  # Non-existant user
                if checkRegisterTime(waiter, 7, False):continue
                if checkBlocked(waiter):continue  # If user is blocked, skip putting them back on the list.
                if getEditCount(waiter) == True:  # If edited, send them to UAA
                        checkUser(waiter, False, False)
                        continue
                if waiter in newlist:continue  # If user already in the list, in case duplicates run over
                # Continue if none of the other checks have issues with the conditions for staying on the waitlist
                newlist = newlist + "\n*{{User|" + waiter + "}}"
                # print "\n*{{User|" + waiter + "}}"
        summary = localconfig.editsumwait
        site = pywikibot.getSite()
        pagename = localconfig.waitlist
        page = pywikibot.Page(site, pagename)
        pagetxt = page.get()
        newlist = newlist.replace("\n*{{User|}}", "")
        page.put(newlist, comment=summary)
开发者ID:dqwiki,项目名称:UAA,代码行数:27,代码来源:globalfunc.py

示例12: main

def main():
    index = None
    djvu = None
    pages = None
    # what would have been changed.
    ask = False
    overwrite = 'ask'

    # Parse command line arguments
    for arg in pywikibot.handleArgs():
        if arg.startswith("-ask"):
            ask = True
        elif arg.startswith("-overwrite:"):
            overwrite = arg[11:12]
            if overwrite != 'y' and overwrite != 'n':
                pywikibot.output(
                    u"Unknown argument %s; will ask before overwriting" % arg)
                overwrite = 'ask'
        elif arg.startswith("-djvu:"):
            djvu = arg[6:]
        elif arg.startswith("-index:"):
            index = arg[7:]
        elif arg.startswith("-pages:"):
            pages = arg[7:]
        else:
            pywikibot.output(u"Unknown argument %s" % arg)

    # Check the djvu file exists
    if djvu:
        os.stat(djvu)

        if not index:
            import os.path
            index = os.path.basename(djvu)

    if djvu and index:
        site = pywikibot.getSite()
        index_page = pywikibot.Page(site, index)

        if site.family.name != 'wikisource':
            raise pywikibot.PageNotFound(
                u"Found family '%s'; Wikisource required." % site.family.name)

        if not index_page.exists() and index_page.namespace() == 0:
            index_namespace = site.mediawiki_message(
                'Proofreadpage index namespace')

            index_page = pywikibot.Page(pywikibot.getSite(),
                                        u"%s:%s" % (index_namespace, index))
        if not index_page.exists():
            raise pywikibot.NoPage(u"Page '%s' does not exist" % index)
        pywikibot.output(u"uploading text from %s to %s"
                         % (djvu, index_page.title(asLink=True)))
        bot = DjVuTextBot(djvu, index, pages, ask, overwrite)
        if not bot.has_text():
            raise ValueError("No text layer in djvu file")
        bot.run()
    else:
        pywikibot.showHelp()
开发者ID:Rodehi,项目名称:GFROS,代码行数:59,代码来源:djvutext.py

示例13: main

def main():
    #page generator
    gen = None
    # This temporary array is used to read the page title if one single
    # page to work on is specified by the arguments.
    pageTitle = []
    # Which namespaces should be processed?
    # default to [] which means all namespaces will be processed
    namespaces = []
    # Never ask before changing a page
    always = False
    # This factory is responsible for processing command line arguments
    # that are also used by other scripts and that determine on which pages
    # to work on.
    genFactory = pagegenerators.GeneratorFactory()

    for arg in pywikibot.handleArgs():
        if arg.startswith('-xml'):
            if len(arg) == 4:
                xmlFilename = i18n.input('pywikibot-enter-xml-filename')
            else:
                xmlFilename = arg[5:]
            gen = XmlDumpNoReferencesPageGenerator(xmlFilename)
        elif arg.startswith('-namespace:'):
            try:
                namespaces.append(int(arg[11:]))
            except ValueError:
                namespaces.append(arg[11:])
        elif arg == '-always':
            always = True
        else:
            if not genFactory.handleArg(arg):
                pageTitle.append(arg)

    if pageTitle:
        page = pywikibot.Page(pywikibot.getSite(), ' '.join(pageTitle))
        gen = iter([page])
    if not gen:
        gen = genFactory.getCombinedGenerator()
    if not gen:
        site = pywikibot.getSite()
        try:
            cat = maintenance_category[site.family.name][site.lang]
        except:
            pass
        else:
            if not namespaces:
                namespaces = [0]
            cat = pywikibot.Category(site, "%s:%s" % (
                site.category_namespace(), cat))
            gen = pagegenerators.CategorizedPageGenerator(cat)
    if not gen:
        pywikibot.showHelp('noreferences')
    else:
        if namespaces:
            gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces)
        preloadingGen = pagegenerators.PreloadingGenerator(gen)
        bot = NoReferencesBot(preloadingGen, always)
        bot.run()
开发者ID:bjonesin,项目名称:pywikibot-core,代码行数:59,代码来源:noreferences.py

示例14: __init__

 def __init__(self, myscraper, testing=False):
     self.myscraper = myscraper
     self.testing = testing
     if testing:
         self.destination_site = pywikibot.getSite("test", "test")
     else:
         self.destination_site = pywikibot.getSite("commons", "commons")
     print self.destination_site
开发者ID:gameguy43,项目名称:usable_image_scraper,代码行数:8,代码来源:wikiuploader.py

示例15: __init__

 def __init__(self, page, filename, summary, overwrite):
     self.page = pywikibot.Page( pywikibot.getSite(), page )
     self.filename = filename
     self.summary = summary
     self.overwrite = overwrite
     if not self.summary:
         self.summary = pywikibot.translate(pywikibot.getSite(), self.msg)
     pywikibot.setAction( self.summary )
开发者ID:masao,项目名称:savemlak,代码行数:8,代码来源:createpage.py


注:本文中的pywikibot.getSite函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。