本文整理汇总了Python中r2.lib.utils.UrlParser.update_query方法的典型用法代码示例。如果您正苦于以下问题:Python UrlParser.update_query方法的具体用法?Python UrlParser.update_query怎么用?Python UrlParser.update_query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类r2.lib.utils.UrlParser
的用法示例。
在下文中一共展示了UrlParser.update_query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resize_image
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
url = UrlParser(image['url'])
url.hostname = g.imgix_domain
# Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
# will prevent insecure content warnings on HTTPS pages.
url.scheme = 'https'
if max_ratio:
url.update_query(fit='crop')
# http://www.imgix.com/docs/reference/size#param-crop
url.update_query(crop='faces,entropy')
url.update_query(arh=max_ratio)
if width:
if width > image['width']:
raise NotLargeEnough()
# http://www.imgix.com/docs/reference/size#param-w
url.update_query(w=width)
if censor_nsfw:
# Since we aren't concerned with inhibiting a user's ability to
# reverse the censoring for privacy reasons, pixellation is better
# than a Gaussian blur because it compresses well. The specific
# value is just "what looks about right".
#
# http://www.imgix.com/docs/reference/stylize#param-px
url.update_query(px=20)
if g.imgix_signing:
url = self._sign_url(url, g.secrets['imgix_signing_token'])
return url.unparse()
示例2: POST_options
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def POST_options(self, all_langs, pref_lang, **kw):
#temporary. eventually we'll change pref_clickgadget to an
#integer preference
kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
if c.user.pref_show_promote is None:
kw['pref_show_promote'] = None
elif not kw.get('pref_show_promote'):
kw['pref_show_promote'] = False
if not kw.get("pref_over_18") or not c.user.pref_over_18:
kw['pref_no_profanity'] = True
if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
kw['pref_label_nsfw'] = True
if kw.get("avatar_img"):
kw["pref_avatar_img"]= kw.get("avatar_img")
# default all the gold options to on if they don't have gold
if not c.user.gold:
for pref in ('pref_show_adbox',
'pref_show_sponsors',
'pref_show_sponsorships',
'pref_highlight_new_comments',
'pref_monitor_mentions'):
kw[pref] = True
self.set_options(all_langs, pref_lang, **kw)
u = UrlParser(c.site.path + "prefs")
u.update_query(done = 'true')
if c.cname:
u.put_in_frame()
return self.redirect(u.unparse())
示例3: POST_options
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def POST_options(self, all_langs, pref_lang, **kw):
#temporary. eventually we'll change pref_clickgadget to an
#integer preference
kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
if c.user.pref_show_promote is None:
kw['pref_show_promote'] = None
elif not kw.get('pref_show_promote'):
kw['pref_show_promote'] = False
if not kw.get("pref_over_18") or not c.user.pref_over_18:
kw['pref_no_profanity'] = True
if kw.get("pref_no_profanity") or c.user.pref_no_profanity:
kw['pref_label_nsfw'] = True
if not c.user.gold:
kw['pref_show_adbox'] = True
kw['pref_show_sponsors'] = True
kw['pref_show_sponsorships'] = True
self.set_options(all_langs, pref_lang, **kw)
u = UrlParser(c.site.path + "prefs")
u.update_query(done = 'true')
if c.cname:
u.put_in_frame()
return self.redirect(u.unparse())
示例4: _update_redirect_uri
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def _update_redirect_uri(base_redirect_uri, params, as_fragment=False):
parsed = UrlParser(base_redirect_uri)
if as_fragment:
parsed.fragment = urlencode(params)
else:
parsed.update_query(**params)
return parsed.unparse()
示例5: POST_options
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def POST_options(self, all_langs, pref_lang, **kw):
self.set_options(all_langs, pref_lang, **kw)
u = UrlParser(c.site.path + "prefs")
u.update_query(done = 'true')
if c.cname:
u.put_in_frame()
return self.redirect(u.unparse())
示例6: url_for_title
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def url_for_title(self, title):
"""Uses the MediaWiki API to get the URL for a wiki page
with the given title"""
if title is None:
return None
from pylons import g
cache_key = ('wiki_url_%s' % title).encode('ascii', 'ignore')
wiki_url = g.cache.get(cache_key)
if wiki_url is None:
# http://www.mediawiki.org/wiki/API:Query_-_Properties#info_.2F_in
api = UrlParser(g.wiki_api_url)
api.update_query(
action = 'query',
titles= title,
prop = 'info',
format = 'yaml',
inprop = 'url'
)
try:
response = urlopen(api.unparse()).read()
parsed_response = yaml.load(response, Loader=yaml.CLoader)
page = parsed_response['query']['pages'][0]
except:
return None
wiki_url = page.get('fullurl').strip()
# Things are created every couple of days so 12 hours seems
# to be a reasonable cache time
g.permacache.set(cache_key, wiki_url, time=3600 * 12)
return wiki_url
示例7: make_feedurl
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def make_feedurl(user, path, ext="rss"):
try:
u = UrlParser(path)
u.update_query(user=user.name, feed=make_feedhash(user, path))
u.set_extension(ext)
return u.unparse()
except:
return path
示例8: make_anchored_permalink
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def make_anchored_permalink(self, link=None, sr=None, context=1, anchor=None):
if link:
permalink = UrlParser(self.make_permalink(link, sr))
else:
permalink = UrlParser(self.make_permalink_slow())
permalink.update_query(context=context)
permalink.fragment = anchor if anchor else self._id36
return permalink.unparse()
示例9: test_sign_url
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def test_sign_url(self):
u = UrlParser('http://examples.imgix.net/frog.jpg?w=100')
signed_url = self.provider._sign_url(u, 'abcdef')
self.assertEqual(signed_url.unparse(),
'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')
u = UrlParser('http://examples.imgix.net/frog.jpg')
u.update_query(w=100)
signed_url = self.provider._sign_url(u, 'abcdef')
self.assertEqual(signed_url.unparse(),
'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')
示例10: POST_options
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def POST_options(self, all_langs, **prefs):
filter_prefs(prefs, c.user)
if c.errors.errors:
return abort(BadRequestError(errors.INVALID_PREF))
set_prefs(c.user, prefs)
c.user._commit()
u = UrlParser(c.site.path + "prefs")
u.update_query(done = 'true')
if c.cname:
u.put_in_frame()
return self.redirect(u.unparse())
示例11: POST_options
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def POST_options(self, all_langs, pref_lang, **kw):
#temporary. eventually we'll change pref_clickgadget to an
#integer preference
kw['pref_clickgadget'] = kw['pref_clickgadget'] and 5 or 0
self.set_options(all_langs, pref_lang, **kw)
u = UrlParser(c.site.path + "prefs")
u.update_query(done = 'true')
if c.cname:
u.put_in_frame()
return self.redirect(u.unparse())
示例12: __init__
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def __init__(self, original_path, subreddit, sub_domain):
Wrapped.__init__(self, original_path=original_path)
if sub_domain and subreddit and original_path:
self.title = "%s - %s" % (subreddit.title, sub_domain)
u = UrlParser(subreddit.path + original_path)
u.hostname = get_domain(cname = False, subreddit = False)
u.update_query(**request.get.copy())
u.put_in_frame()
self.frame_target = u.unparse()
else:
self.title = ""
self.frame_target = None
示例13: test_same_url
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def test_same_url(self):
u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
self.assertEquals(u, u2)
u3 = UrlParser('')
u3.scheme = 'http'
u3.hostname = 'example.com'
u3.port = 8000
u3.path = '/a'
u3.params = 'b'
u3.update_query(foo='bar', bar='baz')
u3.fragment = 'spam'
self.assertEquals(u, u3)
示例14: GET_link_id_redirect
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def GET_link_id_redirect(self, link):
if not link:
abort(404)
elif not link.subreddit_slow.can_view(c.user):
# don't disclose the subreddit/title of a post via the redirect url
abort(403)
else:
redirect_url = link.make_permalink_slow(force_domain=True)
query_params = dict(request.GET)
if query_params:
url = UrlParser(redirect_url)
url.update_query(**query_params)
redirect_url = url.unparse()
return self.redirect(redirect_url, code=301)
示例15: _oembed_post
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import update_query [as 别名]
def _oembed_post(thing, **embed_options):
subreddit = thing.subreddit_slow
if (not can_view_link_comments(thing) or
subreddit.type in Subreddit.private_types):
raise ForbiddenError(errors.POST_NOT_ACCESSIBLE)
live = ''
if embed_options.get('live'):
time = datetime.now(g.tz).isoformat()
live = 'data-card-created="{}"'.format(time)
script = ''
if not embed_options.get('omitscript', False):
script = format_html(SCRIPT_TEMPLATE,
embedly_script=EMBEDLY_SCRIPT,
)
link_url = UrlParser(thing.make_permalink_slow(force_domain=True))
link_url.update_query(ref='share', ref_source='embed')
author_name = ""
if not thing._deleted:
author = thing.author_slow
if author._deleted:
author_name = _("[account deleted]")
else:
author_name = author.name
html = format_html(POST_EMBED_TEMPLATE,
live_data_attr=live,
link_url=link_url.unparse(),
title=websafe(thing.title),
subreddit_url=make_url_https(subreddit.path),
subreddit_name=subreddit.name,
script=script,
)
oembed_response = dict(_OEMBED_BASE,
type="rich",
title=thing.title,
author_name=author_name,
html=html,
)
return oembed_response