本文整理匯總了Python中r2.models.wiki.ImagesByWikiPage類的典型用法代碼示例。如果您正苦於以下問題:Python ImagesByWikiPage類的具體用法?Python ImagesByWikiPage怎麽用?Python ImagesByWikiPage使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了ImagesByWikiPage類的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: valid_url
def valid_url(prop, value, report, generate_https_urls):
"""Validate a URL in the stylesheet.
The only valid URLs for use in a stylesheet are the custom image format
(%%example%%) which this function will translate to actual URLs.
"""
try:
url = value.getStringValue()
except IndexError:
g.log.error("Problem validating [%r]" % value)
raise
m = custom_img_urls.match(url)
if m:
name = m.group(1)
# this relies on localcache to not be doing a lot of lookups
images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
if name in images:
if not generate_https_urls:
url = images[name]
else:
url = g.media_provider.convert_to_https(images[name])
value._setCssText("url(%s)" % url)
else:
# unknown image label -> error
report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
else:
report.append(ValidationError(msgs["custom_images_only"], value))
示例2: images
def images(self):
sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
images = []
for name, url in sr_images.iteritems():
images.append({'name': name,
'link': 'url(%%%%%s%%%%)' % name,
'url': url})
return images
示例3: valid_url
def valid_url(prop, value, report, generate_https_urls, enforce_custom_images_only):
"""
checks url(...) arguments in CSS, ensuring that the contents are
officially sanctioned. Sanctioned urls include:
* anything in /static/
* image labels %%..%% for images uploaded on /about/stylesheet
* urls with domains in g.allowed_css_linked_domains
"""
try:
url = value.getStringValue()
except IndexError:
g.log.error("Problem validating [%r]" % value)
raise
# local urls are allowed
if local_urls.match(url):
if enforce_custom_images_only:
report.append(ValidationError(msgs["custom_images_only"], value))
return
t_url = None
while url != t_url:
t_url, url = url, filters.url_unescape(url)
# disallow path trickery
if "../" in url:
report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
# custom urls are allowed, but need to be transformed into a real path
elif custom_img_urls.match(url):
name = custom_img_urls.match(url).group(1)
# this relies on localcache to not be doing a lot of lookups
images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
if name in images:
if not generate_https_urls:
url = images[name]
else:
url = s3_direct_https(images[name])
value._setCssText("url(%s)" % url)
else:
# unknown image label -> error
report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
else:
if enforce_custom_images_only:
report.append(ValidationError(msgs["custom_images_only"], value))
return
try:
u = urlparse(url)
valid_scheme = u.scheme and u.scheme in valid_url_schemes
valid_domain = u.netloc in g.allowed_css_linked_domains
except ValueError:
u = False
# allowed domains are ok
if not (u and valid_scheme and valid_domain):
report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
示例4: wikimarkdown
def wikimarkdown(text, include_toc=True, target=None):
from r2.lib.template_helpers import make_url_protocol_relative
# this hard codes the stylesheet page for now, but should be parameterized
# in the future to allow per-page images.
from r2.models.wiki import ImagesByWikiPage
from r2.lib.utils import UrlParser
from r2.lib.template_helpers import add_sr
page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
def img_swap(tag):
name = tag.get('src')
name = custom_img_url.search(name)
name = name and name.group(1)
if name and name in page_images:
url = page_images[name]
url = make_url_protocol_relative(url)
tag['src'] = url
else:
tag.extract()
nofollow = True
text = snudown.markdown(_force_utf8(text), nofollow, target,
renderer=snudown.RENDERER_WIKI)
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text.decode('utf-8'))
images = soup.findAll('img')
if images:
[img_swap(image) for image in images]
def add_ext_to_link(link):
url = UrlParser(link.get('href'))
if url.is_reddit_url():
link['href'] = add_sr(link.get('href'), sr_path=False)
if c.render_style == 'compact':
links = soup.findAll('a')
[add_ext_to_link(a) for a in links]
if include_toc:
tocdiv = generate_table_of_contents(soup, prefix="wiki")
if tocdiv:
soup.insert(0, tocdiv)
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
示例5: wikimarkdown
def wikimarkdown(text, include_toc=True, target=None):
from r2.lib.template_helpers import media_https_if_secure
# this hard codes the stylesheet page for now, but should be parameterized
# in the future to allow per-page images.
from r2.models.wiki import ImagesByWikiPage
page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
def img_swap(tag):
name = tag.get('src')
name = custom_img_url.search(name)
name = name and name.group(1)
if name and name in page_images:
url = page_images[name]
url = media_https_if_secure(url)
tag['src'] = url
else:
tag.extract()
nofollow = True
text = snudown.markdown(_force_utf8(text), nofollow, target,
renderer=snudown.RENDERER_WIKI)
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text.decode('utf-8'))
images = soup.findAll('img')
if images:
[img_swap(image) for image in images]
if include_toc:
tocdiv = generate_table_of_contents(soup, prefix="wiki")
if tocdiv:
soup.insert(0, tocdiv)
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
示例6: images
def images(self):
sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
images = []
for name, url in sr_images.iteritems():
images.append({"name": name, "link": "url(%%%%%s%%%%)" % name, "url": url})
return images