本文整理汇总了Python中snudown.markdown函数的典型用法代码示例。如果您正苦于以下问题:Python markdown函数的具体用法?Python markdown怎么用?Python markdown使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了markdown函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wikimarkdown
def wikimarkdown(text, include_toc=True, target=None):
from r2.lib.cssfilter import legacy_s3_url
def img_swap(tag):
name = tag.get('src')
name = custom_img_url.search(name)
name = name and name.group(1)
if name and c.site.images.has_key(name):
url = c.site.images[name]
url = legacy_s3_url(url, c.site)
tag['src'] = url
else:
tag.extract()
nofollow = True
text = snudown.markdown(_force_utf8(text), nofollow, target, g.domain,
renderer=snudown.RENDERER_WIKI)
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text.decode('utf-8'))
images = soup.findAll('img')
if images:
[img_swap(image) for image in images]
if include_toc:
tocdiv = generate_table_of_contents(soup, prefix="wiki")
if tocdiv:
soup.insert(0, tocdiv)
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
示例2: wikimarkdown
def wikimarkdown(text):
from r2.lib.cssfilter import legacy_s3_url
def img_swap(tag):
name = tag.get('src')
name = custom_img_url.search(name)
name = name and name.group(1)
if name and c.site.images.has_key(name):
url = c.site.images[name]
url = legacy_s3_url(url, c.site)
tag['src'] = url
else:
tag.extract()
nofollow = True
target = None
text = snudown.markdown(_force_utf8(text), nofollow, target,
renderer=snudown.RENDERER_WIKI, enable_toc=True)
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text)
images = soup.findAll('img')
if images:
[img_swap(image) for image in images]
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
示例3: normalize_markdown_text
def normalize_markdown_text(parser, source):
rendered = markdown(unicode(source).encode('utf-8'))
html_body = ' '.join(rendered.splitlines())
soup = BeautifulSoup(html_body)
text = ' '.join(soup.findAll(text=True))
text = parser.unescape(text)
return unicode(' '.join(text.splitlines()).replace(',', ' ')).encode('utf-8')
示例4: safemarkdown
def safemarkdown(text, nofollow=False, target=None, lang=None, wrap=True):
from r2.lib.c_markdown import c_markdown
from r2.lib.py_markdown import py_markdown
if c.user.pref_no_profanity:
text = profanity_filter(text)
if not text:
return None
if c.cname and not target:
target = "_top"
if lang is None:
lang = g.markdown_backend
if lang == "snudown":
text = snudown.markdown(_force_utf8(text), nofollow, target)
elif lang == "c":
text = c_markdown(text, nofollow, target)
elif lang == "py":
text = py_markdown(text, nofollow, target)
else:
raise ValueError("weird lang [%s]" % lang)
if wrap:
return SC_OFF + MD_START + text + MD_END + SC_ON
else:
return text
示例5: safemarkdown
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
from r2.lib.utils import generate_affiliate_link, domain
if not text:
return None
target = kwargs.get("target", None)
text = snudown.markdown(_force_utf8(text), nofollow, target)
to_affiliate = kwargs.get("affiliate", False)
if to_affiliate:
soup = BeautifulSoup(text.decode('utf-8'))
links = soup.findAll('a')
update_text = False
def detect_affiliate(markdown_link):
return domain(markdown_link.get('href'))\
in g.merchant_affiliate_domains
for link in filter(detect_affiliate, links):
update_text = True
link['class'] = 'affiliate'
link['data-href-url'] = link.get('href')
link['data-affiliate-url'] = generate_affiliate_link(
link.get('href')
)
if update_text:
text = str(soup)
if wrap:
return SC_OFF + MD_START + text + MD_END + SC_ON
else:
return SC_OFF + text + SC_ON
示例6: hello
def hello():
messages = rds.zrevrangebyscore('goygoy', '+inf', '-inf')
msgs = []
for i in messages:
msg = json.loads(i)
msgs.append(dict(
msg = _force_unicode(snudown.markdown(_force_utf8(msg['msg']))),
username='anonim'
))
return render_template('index.html', messages=msgs)
示例7: strip_markdown
def strip_markdown(text):
"""Extract text from a markdown string.
"""
html = markdown(text.encode('utf-8'))
soup = BeautifulSoup(
html,
"html.parser",
from_encoding='utf8'
)
return "".join(soup.findAll(text=True))
示例8: extract_urls_from_markdown
def extract_urls_from_markdown(md):
"Extract URLs that will be hot links from a piece of raw Markdown."
html = snudown.markdown(_force_utf8(md))
links = SoupStrainer("a")
for link in BeautifulSoup(html, parseOnlyThese=links):
url = link.get('href')
if url:
yield url
示例9: safemarkdown
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
if not text:
return None
target = kwargs.get("target", None)
text = snudown.markdown(_force_utf8(text), nofollow, target)
if wrap:
return SC_OFF + MD_START + text + MD_END + SC_ON
else:
return SC_OFF + text + SC_ON
示例10: runTest
def runTest(self):
output = snudown.markdown(self.input)
for i, (a, b) in enumerate(zip(repr(self.expected_output), repr(output))):
if a != b:
io = StringIO.StringIO()
print >> io, "TEST FAILED:"
print >> io, " input: %s" % repr(self.input)
print >> io, " expected: %s" % repr(self.expected_output)
print >> io, " actual: %s" % repr(output)
print >> io, " %s" % (" " * i + "^")
self.fail(io.getvalue())
示例11: safemarkdown
def safemarkdown(text, nofollow=False, target=None, wrap=True):
if not text:
return None
if c.cname and not target:
target = "_top"
text = snudown.markdown(_force_utf8(text), nofollow, target)
if wrap:
return SC_OFF + MD_START + text + MD_END + SC_ON
else:
return text
示例12: wikimarkdown
def wikimarkdown(text, include_toc=True, target=None):
from r2.lib.template_helpers import make_url_protocol_relative
# this hard codes the stylesheet page for now, but should be parameterized
# in the future to allow per-page images.
from r2.models.wiki import ImagesByWikiPage
from r2.lib.utils import UrlParser
from r2.lib.template_helpers import add_sr
page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
def img_swap(tag):
name = tag.get('src')
name = custom_img_url.search(name)
name = name and name.group(1)
if name and name in page_images:
url = page_images[name]
url = make_url_protocol_relative(url)
tag['src'] = url
else:
tag.extract()
nofollow = True
text = snudown.markdown(_force_utf8(text), nofollow, target,
renderer=snudown.RENDERER_WIKI)
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text.decode('utf-8'))
images = soup.findAll('img')
if images:
[img_swap(image) for image in images]
def add_ext_to_link(link):
url = UrlParser(link.get('href'))
if url.is_reddit_url():
link['href'] = add_sr(link.get('href'), sr_path=False)
if c.render_style == 'compact':
links = soup.findAll('a')
[add_ext_to_link(a) for a in links]
if include_toc:
tocdiv = generate_table_of_contents(soup, prefix="wiki")
if tocdiv:
soup.insert(0, tocdiv)
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
示例13: safemarkdown
def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
if not text:
return None
# this lets us skip the c.cname lookup (which is apparently quite
# slow) if target was explicitly passed to this function.
target = kwargs.get("target", None)
if "target" not in kwargs and c.cname:
target = "_top"
text = snudown.markdown(_force_utf8(text), nofollow, target)
if wrap:
return SC_OFF + MD_START + text + MD_END + SC_ON
else:
return SC_OFF + text + SC_ON
示例14: runTest
def runTest(self):
output = snudown.markdown(self.input)
for i, (a, b) in enumerate(zip(repr(self.expected_output),
repr(output))):
if a != b:
try:
io = StringIO.StringIO()
except:
io = StringIO()
print("TEST FAILED:", file=io)
print(" input: %s" % repr(self.input), file=io)
print(" expected: %s" % repr(self.expected_output), file=io)
print(" actual: %s" % repr(output), file=io)
print(" %s" % (' ' * i + '^'), file=io)
self.fail(io.getvalue())
示例15: wikimarkdown
def wikimarkdown(text, include_toc=True, target=None):
from r2.lib.cssfilter import legacy_s3_url
nofollow = True
text = snudown.markdown(_force_utf8(text), nofollow, target, g.domain )
# TODO: We should test how much of a load this adds to the app
soup = BeautifulSoup(text.decode('utf-8'))
if include_toc:
tocdiv = generate_table_of_contents(soup, prefix="wiki")
if tocdiv:
soup.insert(0, tocdiv)
text = str(soup)
return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON