当前位置: 首页>>代码示例>>Python>>正文


Python wiki.ImagesByWikiPage类代码示例

本文整理汇总了Python中r2.models.wiki.ImagesByWikiPage的典型用法代码示例。如果您正苦于以下问题:Python ImagesByWikiPage类的具体用法?Python ImagesByWikiPage怎么用?Python ImagesByWikiPage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ImagesByWikiPage类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: valid_url

def valid_url(prop, value, report, generate_https_urls):
    """Validate a URL in the stylesheet.

    The only valid URLs for use in a stylesheet are the custom image format
    (%%example%%) which this function will translate to actual URLs.

    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise

    m = custom_img_urls.match(url)
    if m:
        name = m.group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            if not generate_https_urls:
                url = images[name]
            else:
                url = g.media_provider.convert_to_https(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    else:
        report.append(ValidationError(msgs["custom_images_only"], value))
开发者ID:tolgaek,项目名称:reddit,代码行数:31,代码来源:cssfilter.py

示例2: images

 def images(self):
     sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
     images = []
     for name, url in sr_images.iteritems():
         images.append({'name': name,
                        'link': 'url(%%%%%s%%%%)' % name,
                        'url': url})
     return images
开发者ID:Dakta,项目名称:reddit,代码行数:8,代码来源:jsontemplates.py

示例3: valid_url

def valid_url(prop, value, report, generate_https_urls, enforce_custom_images_only):
    """
    checks url(...) arguments in CSS, ensuring that the contents are
    officially sanctioned.  Sanctioned urls include:
     * anything in /static/
     * image labels %%..%% for images uploaded on /about/stylesheet
     * urls with domains in g.allowed_css_linked_domains
    """
    try:
        url = value.getStringValue()
    except IndexError:
        g.log.error("Problem validating [%r]" % value)
        raise
    # local urls are allowed
    if local_urls.match(url):
        if enforce_custom_images_only:
            report.append(ValidationError(msgs["custom_images_only"], value))
            return

        t_url = None
        while url != t_url:
            t_url, url = url, filters.url_unescape(url)
        # disallow path trickery
        if "../" in url:
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    # custom urls are allowed, but need to be transformed into a real path
    elif custom_img_urls.match(url):
        name = custom_img_urls.match(url).group(1)

        # this relies on localcache to not be doing a lot of lookups
        images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")

        if name in images:
            if not generate_https_urls:
                url = images[name]
            else:
                url = s3_direct_https(images[name])
            value._setCssText("url(%s)" % url)
        else:
            # unknown image label -> error
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
    else:
        if enforce_custom_images_only:
            report.append(ValidationError(msgs["custom_images_only"], value))
            return

        try:
            u = urlparse(url)
            valid_scheme = u.scheme and u.scheme in valid_url_schemes
            valid_domain = u.netloc in g.allowed_css_linked_domains
        except ValueError:
            u = False

        # allowed domains are ok
        if not (u and valid_scheme and valid_domain):
            report.append(ValidationError(msgs["broken_url"] % dict(brokenurl=value.cssText), value))
开发者ID:andre-d,项目名称:reddit,代码行数:56,代码来源:cssfilter.py

示例4: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:pra85,项目名称:reddit,代码行数:50,代码来源:filters.py

示例5: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import media_https_if_secure

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = media_https_if_secure(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]
    
    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:Acceto,项目名称:reddit,代码行数:39,代码来源:filters.py

示例6: images

 def images(self):
     sr_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
     images = []
     for name, url in sr_images.iteritems():
         images.append({"name": name, "link": "url(%%%%%s%%%%)" % name, "url": url})
     return images
开发者ID:JingyanZ,项目名称:reddit,代码行数:6,代码来源:jsontemplates.py


注:本文中的r2.models.wiki.ImagesByWikiPage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。