本文整理汇总了Python中pywikibot.removeDisabledParts函数的典型用法代码示例。如果您正苦于以下问题:Python removeDisabledParts函数的具体用法?Python removeDisabledParts怎么用?Python removeDisabledParts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了removeDisabledParts函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: weblinksIn
def weblinksIn(text, withoutBracketed = False, onlyBracketed = False):
text = pywikibot.removeDisabledParts(text)
# MediaWiki parses templates before parsing external links. Thus, there
# might be a | or a } directly after a URL which does not belong to
# the URL itself.
# First, remove the curly braces of inner templates:
nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')
while nestedTemplateR.search(text):
text = nestedTemplateR.sub(r'{{\1 \2 \3}}', text)
# Then blow up the templates with spaces so that the | and }} will not be regarded as part of the link:.
templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\|([^ ][^}]*?)}}',
re.DOTALL)
while templateWithParamsR.search(text):
text = templateWithParamsR.sub(r'{{ \1 | \2 }}', text)
linkR = pywikibot.compileLinkR(withoutBracketed, onlyBracketed)
# Remove HTML comments in URLs as well as URLs in HTML comments.
# Also remove text inside nowiki links etc.
text = pywikibot.removeDisabledParts(text)
for m in linkR.finditer(text):
if m.group('url'):
yield m.group('url')
else:
yield m.group('urlb')
示例2: get
def get(self, force=False, fetch_text=True, cache=True, *args):
# Realistically no one even wants the property info, and datatype is its own function.
# Cache controls only saving as cache, not fetching from it
if fetch_text:
return_this = super(pywikibot.PropertyPage, self).get(force, *args) # Do it cuz
else:
return_this = {}
# Check that we don't already have it stored
if not force and hasattr(self, '_constraints'):
return return_this
talk = self.toggleTalkPage()
if not talk.exists():
text = ''
else:
g = mc.get(self.md5())
if g is not None:
self._constraints = ast.literal_eval(g)
return return_this
else:
text = talk.get()
code = mwparserfromhell.parse(text)
d = {}
for temp in code.filter_templates(recursive=False):
if temp.name.lower().startswith('constraint:'):
nm = temp.name.lower()[11:]
nm = normalize(nm)
if nm == 'format':
value = unicode(temp.get('pattern').value)
d[nm] = pywikibot.removeDisabledParts(value, tags=['nowiki'])
elif nm in ['target', 'item']:
d[nm] = {'property': unicode(temp.get('property').value),
}
if temp.has_param('item'):
d[nm]['item'] = unicode(temp.get('item').value)
elif nm == 'oneof':
values = unicode(temp.get('values').value)
values = pywikibot.removeDisabledParts(values, tags=['comments'])
values = values.replace('{{Q|', '').replace('{{q|', '').replace('}}', '')
values = values.split(', ')
d[nm] = list()
for v in values:
d[nm].append('Q' + v)
elif nm == 'reciprocal':
d[nm] = unicode(temp.get('property').value)
else:
d[nm] = '' # Just set a key like the API does
self._constraints = d
if cache:
mc.set(self.md5(), self._constraints, expiry)
return return_this
示例3: do_page
def do_page(article):
pg = pywikibot.Page(site, article)
if not pg.exists():
return
while pg.isRedirectPage():
pg = pg.getRedirectTarget()
if pg.namespace() != 2:
print 'Skipping %s.' % pg.title()
return
text = pg.get()
text = pywikibot.removeDisabledParts(text)
print '--------%s---------' % pg.title()
print text[:150]
print '-------------------'
x=raw_input('What should the title be? ')
if x == 's':
print 'Skipping.'
return
elif x == 'o':
webbrowser.open('http://enwp.org/%s' %pg.title())
return
new_title = 'Wikipedia talk:Articles for creation/' + x.strip()
reason = 'Preferred location for [[WP:AFC|AfC]] submissions'
new_pg = pywikibot.Page(site, new_title)
if new_pg.exists():
print '%s already exists, will add a (2) to the end.' % new_pg.title()
new_title += ' (2)'
print 'Moving to %s' % new_title
pg.move(new_title, reason)
示例4: __iter__
def __iter__(self):
import xmlreader
dump = xmlreader.XmlDump(self.xmlFilename)
for entry in dump.parse():
text = pywikibot.removeDisabledParts(entry.text)
if self.refR.search(text) and not self.referencesR.search(text):
yield pywikibot.Page(pywikibot.Site(), entry.title)
示例5: normalize_usk
def normalize_usk(thingy):
thingy = pywikibot.removeDisabledParts(thingy)
thingy = thingy.strip()
if thingy.isdigit():
if int(thingy) in USK:
item = pywikibot.ItemPage(repo, USK[int(thingy)])
return item
示例6: determineClass
def determineClass(self, code, page):
if page.toggleTalkPage().isRedirectPage():
return 'redirect'
if page.namespace() == 101:
return 'portal'
elif page.namespace() == 15:
return 'category'
elif page.namespace() == 11:
return 'template'
if self.level == 'simple':
return None
found = list()
stub = False
code = mwparserfromhell.parse(pywikibot.removeDisabledParts(unicode(code))) #wtf
for template in code.filter_templates(recursive=True):
if template.has_param('class'):
found.append(template.get('class').value.strip())
if (template.name.lower() in self.stub_templates) and (not stub):
stub = True
#check for auto=stub
if not found:
if stub:
return 'stub'
return None
if (self.level == 'conservative') and (len(found) == 1):
if stub:
return 'stub'
return None
if found.count(found[0]) == len(found): #verifies that all values are equal
return found[0]
if self.level in ['inherit', 'conservative']:
if stub:
return 'stub'
return None
#can only be 'liberal'
d={}
for value in found:
value = value.lower().strip()
if not d.has_key(value):
d[value] = 1
else:
d[value] += 1
#top = d.keys()[d.values().index(max(d.values()))]
sorted_d = sorted(d.iteritems(), key=operator.itemgetter(1), reverse=True)
top = sorted_d[0][1]
top_value = sorted_d[0][0]
key=1
print sorted_d
if len(sorted_d) == 1:
return top_value
while top == sorted_d[key][1]:
if self.valueClass(top_value) <= self.valueClass(sorted_d[key][0]):
top_value = sorted_d[key][0]
key += 1
if len(sorted_d) >= key:
break
return top_value
示例7: lacksReferences
def lacksReferences(self, text):
"""
Checks whether or not the page is lacking a references tag.
"""
oldTextCleaned = pywikibot.removeDisabledParts(text)
if self.referencesR.search(oldTextCleaned) or \
self.referencesTagR.search(oldTextCleaned):
if self.verbose:
pywikibot.output(u'No changes necessary: references tag found.')
return False
elif self.referencesTemplates:
templateR = u'{{(' + u'|'.join(self.referencesTemplates) + ')'
if re.search(templateR, oldTextCleaned, re.IGNORECASE | re.UNICODE):
if self.verbose:
pywikibot.output(
u'No changes necessary: references template found.')
return False
if not self.refR.search(oldTextCleaned):
if self.verbose:
pywikibot.output(u'No changes necessary: no ref tags found.')
return False
else:
if self.verbose:
pywikibot.output(u'Found ref without references.')
return True
示例8: parseInstructions
def parseInstructions(page):
"""
Parses the index template for all of the parameters
"""
text = page.get()
#print u'Parsing instructions for [[%s]].' % page.title()
key = text.find('{{User:HBC Archive Indexerbot/OptIn')
data = text[key:].split('}}')[0][36:] #kinda scared about hardcoding so much
#remove any comments (apparently users do this)
cleaned = pywikibot.removeDisabledParts(data)
info = {}
info['mask'] = []
info['talkpage'] = page.title()
for param in cleaned.split('|'):
param = clean(param)
if param.startswith('target='):
target = clean(param[7:])
if target.startswith('/'):
target = page.title() + target
elif target.startswith('./'):
target = page.title() + target[1:]
info['target'] = target
elif param.startswith('mask='):
mask = clean(param[5:])
if mask.startswith('/'):
mask = page.title() + mask
elif mask.startswith('./'):
mask = page.title() + mask[1:]
info['mask'].append(mask)
elif param.startswith('indexhere='):
value = param[10:]
if clean(value.lower()) == 'yes':
info['indexhere'] = True
else:
info['indexhere'] = False
elif param.startswith('template='):
info['template'] = clean(param[9:].replace('\n',''))
elif param.startswith('leading_zeros='):
try:
info['leading_zeros'] = int(clean(param[14:]))
except ValueError:
pass
elif param.startswith('first_archive='):
info['first_archive'] = clean(param[14:])
#set default values if not already set
for key in info.keys():
if type(info[key]) == type(u''):
if info[key].isspace() or (not info[key]):
del info[key]
if not info.has_key('leading_zeros'):
info['leading_zeros'] = 0
if not info.has_key('indexhere'):
info['indexhere'] = False
if not info.has_key('template'):
info['template'] = 'User:HBC Archive Indexerbot/default template'
if info['template'] == 'template location':
info['template'] = 'User:HBC Archive Indexerbot/default template'
return info
示例9: procesPage
def procesPage(self, page):
"""
Proces a single page
"""
item = pywikibot.DataPage(page)
pywikibot.output('Processing %s' % page)
if not item.exists():
pywikibot.output('%s doesn\'t have a wikidata item :(' % page)
#TODO FIXME: We should provide an option to create the page
else:
pagetext = page.get()
pagetext = pywikibot.removeDisabledParts(pagetext)
templates = pywikibot.extract_templates_and_params(pagetext)
for (template, fielddict) in templates:
# We found the template we were looking for
if template.replace(u'_', u' ') == self.templateTitle:
for field, value in fielddict.items():
# This field contains something useful for us
if field in self.fields:
# Check if the property isn't already set
claim = self.fields[field]
if claim in item.get().get('claims'):
pywikibot.output(
u'A claim for %s already exists. Skipping'
% (claim,))
# TODO FIXME: This is a very crude way of dupe
# checking
else:
# Try to extract a valid page
match = re.search(
re.compile(
r'\[\[(?P<title>[^\]|[#<>{}]*)(\|.*?)?\]\]'),
value)
if match:
try:
link = match.group(1)
linkedPage = pywikibot.Page(self.site,
link)
if linkedPage.isRedirectPage():
linkedPage = linkedPage.getRedirectTarget()
linkedItem = pywikibot.DataPage(linkedPage)
pywikibot.output('Adding %s --> %s'
% (claim,
linkedItem.getID()))
if self.setSource(self.site().language()):
item.editclaim(
str(claim),
linkedItem.getID(),
refs={self.setSource(
self.site().language())})
else:
item.editclaim(str(claim),
linkedItem.getID())
except pywikibot.NoPage:
pywikibot.output(
"[[%s]] doesn't exist so I can't link to it"
% linkedItem.title())
示例10: normalize_pegi
def normalize_pegi(thingy):
# BECAUSE PEOPLE DO WEIRD THINGS!
thingy = pywikibot.removeDisabledParts(thingy)
thingy = thingy.replace('+', '')
thingy = thingy.strip()
if thingy.isdigit():
if int(thingy) in PEGI:
item = pywikibot.ItemPage(repo, PEGI[int(thingy)])
return item
示例11: do_page
def do_page(self, page):
print page.title(asLink=True)
if page.namespace() != 6:
return
text = page.get()
text, gen_fix_summary = self.AWBGenFixes.do_page(text)
code = mwparserfromhell.parse(text)
tag = False
log = '* '
summary = 'Bot: Updating license tag(s) with image has rationale=yes'
for template in code.filter_templates(recursive=True):
name = pywikibot.removeDisabledParts(template.name.lower()).strip()
print name
#print self.NFURs
#time.sleep(5)
if name in self.NFURs:
print name
tag = True
if tag:
for template in code.filter_templates(recursive=True):
name = pywikibot.removeDisabledParts(template.name.lower()).strip()
if name in self.licenses:
template.add('image has rationale', 'yes')
log += '[[:%s]]: Adding <code>|image has rationale=yes</code>' % page.title()
else:
print 'Skipping '+page.title(asLink=True)
return
if gen_fix_summary:
summary += ', also dating ' + gen_fix_summary
puttext = unicode(code).lstrip('\n')
pywikibot.showDiff(text, puttext)
self.output(log)
self.check_page()
try:
page.put(puttext, summary, nocreate=True)
except pywikibot.exceptions.PageNotSaved:
pass
except pywikibot.exceptions.LockedPage:
pass
示例12: rem
def rem(text):
# delete table -- TODO: ^\{\{ or ^[\*\:\#]*\{\{
text = lre.rmsym(r"\{\|", r"\|\}", text)
# delete template
text = lre.rmsym(r"\{\{", r"\}\}", text)
text = subst.process(text)
text = pywikibot.removeDisabledParts(text)
text = pywikibot.removeLanguageLinks(text)
text = pywikibot.removeCategoryLinks(text)
text = pywikibot.removeHTMLParts(text)
return text
示例13: do_page
def do_page(self, text, fixes=True, date=True):
if fixes:
text = self.all_fixes(text)
code = mwparserfromhell.parse(text)
summary= {}
for temp in code.filter_templates(recursive=True):
name = pywikibot.removeDisabledParts(temp.name.lower()).strip()
if name in self.redirects.keys():
new_name = self.redirects[name]
if new_name.lower() != name: #prevents from capitalizing the first letter needlessly
temp.name = new_name
if (temp.name.lower() in self.date_these) and date:
if not temp.has_param('date'):
temp.add('date', datetime.datetime.today().strftime('%B %Y'))
if temp.name.lower() in summary.keys():
summary[temp.name.lower()] += 1
else:
summary[temp.name.lower()] = 1
msg = ', '.join('{{%s}} (%s)' % (item, summary[item]) for item in summary.keys())
return unicode(code), msg
示例14: standardizePageFooter
def standardizePageFooter(self, text):
"""
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order. This combines the
old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. ## TODO: template beyond categories ##
3. additional information depending on local site policy
4. stars templates for featured and good articles
5. interwiki links
"""
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
categories = None
interwikiLinks = None
allstars = []
# The PyWikipediaBot is no longer allowed to touch categories on the
# German Wikipedia. See
# http://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and not '{{Personendaten' in text and \
not '{{SORTIERUNG' in text and not '{{DEFAULTSORT' in text and \
not self.site.lang in ('et', 'it', 'bg', 'ru'):
categories = pywikibot.getCategoryLinks(text, site=self.site)
if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.lang]
del tmpl
except KeyError:
pass
if loc is not None and loc in self.title:
subpage = True
interwikiLinks = pywikibot.getLanguageLinks(
text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = pywikibot.removeLanguageLinks(text, site=self.site)
# Removing the stars' issue
starstext = pywikibot.removeDisabledParts(text)
for star in starsList:
regex = re.compile('(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
text = regex.sub('', text)
allstars += found
# Adding categories
if categories:
##Sorting categories in alphabetic order. beta test only on Persian Wikipedia, TODO fix bug for sorting
#if self.site.language() == 'fa':
# categories.sort()
##Taking main cats to top
# for name in categories:
# if re.search(u"(.+?)\|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title:
# categories.remove(name)
# categories.insert(0, name)
text = pywikibot.replaceCategoryLinks(text, categories,
site=self.site)
# Adding stars templates
if allstars:
text = text.strip() + self.site.family.interwiki_text_separator
allstars.sort()
for element in allstars:
text += '%s%s' % (element.strip(), config.line_separator)
pywikibot.log(u'%s' %element.strip())
#.........这里部分代码省略.........
示例15: run
def run(self):
""" Run the Bot """
try:
deadLinks = codecs.open(listof404pages, 'r', 'latin_1').read()
except IOError:
pywikibot.output(
'You need to download '
'http://www.twoevils.org/files/wikipedia/404-links.txt.gz '
'and to ungzip it in the same directory')
raise
socket.setdefaulttimeout(30)
editedpages = 0
for page in self.generator:
try:
# Load the page's text from the wiki
new_text = page.get()
if not page.canBeEdited():
pywikibot.output(u"You can't edit page %s"
% page.title(asLink=True))
continue
except pywikibot.NoPage:
pywikibot.output(u'Page %s not found' % page.title(asLink=True))
continue
except pywikibot.IsRedirectPage:
pywikibot.output(u'Page %s is a redirect'
% page.title(asLink=True))
continue
# for each link to change
for match in linksInRef.finditer(
pywikibot.removeDisabledParts(page.get())):
link = match.group(u'url')
# debugging purpose
# print link
if u'jstor.org' in link:
# TODO: Clean URL blacklist
continue
ref = RefLink(link, match.group('name'))
f = None
try:
socket.setdefaulttimeout(20)
try:
f = urllib2.urlopen(ref.url.decode("utf8"))
except UnicodeError:
ref.url = urllib2.quote(ref.url.encode("utf8"), "://")
f = urllib2.urlopen(ref.url)
# Try to get Content-Type from server
headers = f.info()
contentType = headers.getheader('Content-Type')
if contentType and not self.MIME.search(contentType):
if ref.link.lower().endswith('.pdf') and \
not self.getOption('ignorepdf'):
# If file has a PDF suffix
self.getPDFTitle(ref, f)
else:
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : '
u'media : %s ' % ref.link)
if ref.title:
if not re.match(
u'(?i) *microsoft (word|excel|visio)',
ref.title):
ref.transform(ispdf=True)
repl = ref.refTitle()
else:
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : '
u'PDF title blacklisted : %s ' % ref.title)
repl = ref.refLink()
else:
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
continue
# Get the real url where we end (http redirects !)
redir = f.geturl()
if redir != ref.link and \
domain.findall(redir) == domain.findall(link):
if soft404.search(redir) and \
not soft404.search(ref.link):
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : '
u'Redirect 404 : %s ' % ref.link)
continue
if dirIndex.match(redir) and \
not dirIndex.match(ref.link):
pywikibot.output(
u'\03{lightyellow}WARNING\03{default} : '
u'Redirect to root : %s ' % ref.link)
continue
# uncompress if necessary
if headers.get('Content-Encoding') in ('gzip', 'x-gzip'):
# XXX: small issue here: the whole page is downloaded
# through f.read(). It might fetch big files/pages.
# However, truncating an encoded gzipped stream is not
# an option, for unzipping will fail.
compressed = StringIO.StringIO(f.read())
f = gzip.GzipFile(fileobj=compressed)
#.........这里部分代码省略.........