本文整理汇总了Python中r2.lib.utils.UrlParser.hostname方法的典型用法代码示例。如果您正苦于以下问题:Python UrlParser.hostname方法的具体用法?Python UrlParser.hostname怎么用?Python UrlParser.hostname使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类r2.lib.utils.UrlParser
的用法示例。
在下文中一共展示了UrlParser.hostname方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: purge_url
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def purge_url(self, url):
"""Purge an image (by url) from imgix.
Reference: http://www.imgix.com/docs/tutorials/purging-images
Note that as mentioned in the imgix docs, in order to remove
an image, this function should be used *after* already
removing the image from our source, or imgix will just re-fetch
and replace the image with a new copy even after purging.
"""
p = UrlParser(url)
if p.hostname == g.imgix_domain:
p.hostname = g.imgix_purge_domain
elif p.hostname == g.imgix_gif_domain:
p.hostname = g.imgix_gif_purge_domain
url = p.unparse()
requests.post(
"https://api.imgix.com/v2/image/purger",
auth=(g.secrets["imgix_api_key"], ""),
data={"url": url},
)
示例2: test_url_mutation
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def test_url_mutation(self):
u = UrlParser("http://example.com/")
u.hostname = g.domain
self.assertTrue(u.is_reddit_url())
u = UrlParser("http://%s/" % g.domain)
u.hostname = "example.com"
self.assertFalse(u.is_reddit_url())
示例3: resize_image
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def resize_image(self, image, width=None, file_type=None, censor_nsfw=False,
max_ratio=None):
url = UrlParser(image['url'])
is_gif = url.path.endswith('.gif') and (file_type == 'mp4' or not file_type)
if is_gif:
url.hostname = g.imgix_gif_domain
else:
url.hostname = g.imgix_domain
# Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
# will prevent insecure content warnings on HTTPS pages.
url.scheme = 'https'
# g.s3_media_direct affects how preview image urls are stored
# True: http://{s3_media_domain}/mybucket/helloworld.jpg
# False: http://mybucket/helloworld.jpg
# If it's True, we'll need to strip the bucket out of the path
if g.s3_media_direct:
path_parts = url.path.split('/')
path_parts.pop(1)
url.path = '/'.join(path_parts)
if max_ratio:
url.update_query(fit='crop')
# http://www.imgix.com/docs/reference/size#param-crop
url.update_query(crop='faces,entropy')
url.update_query(arh=max_ratio)
if width:
if width > image['width']:
raise NotLargeEnough()
# http://www.imgix.com/docs/reference/size#param-w
url.update_query(w=width)
if file_type and file_type in ('gif', 'jpg', 'png', 'mp4'):
url.update_query(fm=file_type)
# We need to disable fragmented mp4s for proper playback in Firefox
if file_type == 'mp4':
url.update_query(**{'mp4-fragmented': 'false'})
if censor_nsfw:
# Do an initial blur to make sure we're getting rid of icky
# details.
#
# http://www.imgix.com/docs/reference/stylize#param-blur
url.update_query(blur=600)
# And then add pixellation to help the image compress well.
#
# http://www.imgix.com/docs/reference/stylize#param-px
url.update_query(px=32)
if g.imgix_signing:
if is_gif:
url = self._sign_url(url, g.secrets['imgix_gif_signing_token'])
else:
url = self._sign_url(url, g.secrets['imgix_signing_token'])
return url.unparse()
示例4: add_sr
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def add_sr(
path, sr_path=True, nocname=False, force_hostname=False,
retain_extension=True, force_https=False):
"""
Given a path (which may be a full-fledged url or a relative path),
parses the path and updates it to include the subreddit path
according to the rules set by its arguments:
* sr_path: if a cname is not used for the domain, updates the
path to include c.site.path.
* nocname: when updating the hostname, overrides the value of
c.cname to set the hostname to g.domain. The default behavior
is to set the hostname consistent with c.cname.
* force_hostname: if True, force the url's hostname to be updated
even if it is already set in the path, and subject to the
c.cname/nocname combination. If false, the path will still
have its domain updated if no hostname is specified in the url.
* retain_extension: if True, sets the extention according to
c.render_style.
* force_https: force the URL scheme to https
For caching purposes: note that this function uses:
c.cname, c.render_style, c.site.name
"""
# don't do anything if it is just an anchor
if path.startswith(('#', 'javascript:')):
return path
u = UrlParser(path)
if sr_path and (nocname or not c.cname):
u.path_add_subreddit(c.site)
if not u.hostname or force_hostname:
if c.secure:
u.hostname = request.host
else:
u.hostname = get_domain(cname = (c.cname and not nocname),
subreddit = False)
if (c.secure and u.is_reddit_url()) or force_https:
u.scheme = "https"
if retain_extension:
if c.render_style == 'mobile':
u.set_extension('mobile')
elif c.render_style == 'compact':
u.set_extension('compact')
return u.unparse()
示例5: resize_image
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
url = UrlParser(image['url'])
url.hostname = g.imgix_domain
# Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
# will prevent insecure content warnings on HTTPS pages.
url.scheme = 'https'
if max_ratio:
url.update_query(fit='crop')
# http://www.imgix.com/docs/reference/size#param-crop
url.update_query(crop='faces,entropy')
url.update_query(arh=max_ratio)
if width:
if width > image['width']:
raise NotLargeEnough()
# http://www.imgix.com/docs/reference/size#param-w
url.update_query(w=width)
if censor_nsfw:
# Do an initial blur to make sure we're getting rid of icky
# details.
#
# http://www.imgix.com/docs/reference/stylize#param-blur
url.update_query(blur=600)
# And then add pixellation to help the image compress well.
#
# http://www.imgix.com/docs/reference/stylize#param-px
url.update_query(px=32)
if g.imgix_signing:
url = self._sign_url(url, g.secrets['imgix_signing_token'])
return url.unparse()
示例6: resize_image
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def resize_image(self, image, width=None, censor_nsfw=False, max_ratio=None):
url = UrlParser(image['url'])
url.hostname = g.imgix_domain
# Let's encourage HTTPS; it's cool, works just fine on HTTP pages, and
# will prevent insecure content warnings on HTTPS pages.
url.scheme = 'https'
if max_ratio:
url.update_query(fit='crop')
# http://www.imgix.com/docs/reference/size#param-crop
url.update_query(crop='faces,entropy')
url.update_query(arh=max_ratio)
if width:
if width > image['width']:
raise NotLargeEnough()
# http://www.imgix.com/docs/reference/size#param-w
url.update_query(w=width)
if censor_nsfw:
# Since we aren't concerned with inhibiting a user's ability to
# reverse the censoring for privacy reasons, pixellation is better
# than a Gaussian blur because it compresses well. The specific
# value is just "what looks about right".
#
# http://www.imgix.com/docs/reference/stylize#param-px
url.update_query(px=20)
if g.imgix_signing:
url = self._sign_url(url, g.secrets['imgix_signing_token'])
return url.unparse()
示例7: add_sr
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def add_sr(path, sr_path = True, nocname=False, force_hostname = False):
"""
Given a path (which may be a full-fledged url or a relative path),
parses the path and updates it to include the subreddit path
according to the rules set by its arguments:
* force_hostname: if True, force the url's hotname to be updated
even if it is already set in the path, and subject to the
c.cname/nocname combination. If false, the path will still
have its domain updated if no hostname is specified in the url.
* nocname: when updating the hostname, overrides the value of
c.cname to set the hotname to g.domain. The default behavior
is to set the hostname consistent with c.cname.
* sr_path: if a cname is not used for the domain, updates the
path to include c.site.path.
"""
u = UrlParser(path)
if sr_path and (nocname or not c.cname):
u.path_add_subreddit(c.site)
if not u.hostname or force_hostname:
u.hostname = get_domain(cname = (c.cname and not nocname),
subreddit = False)
if c.render_style == 'mobile':
u.set_extension('mobile')
return u.unparse()
示例8: POST_request_promo
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def POST_request_promo(self, srnames):
if not srnames:
return
srnames = srnames.split('+')
# request multiple ads in case some are hidden by the builder due
# to the user's hides/preferences
response = adzerk_request(srnames)
if not response:
g.stats.simple_event('adzerk.request.no_promo')
return
res_by_campaign = {r.campaign: r for r in response}
tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
keep_fn=promote.promo_keep_fn,
num=1,
skip=True)
listing = LinkListing(builder, nextprev=False).listing()
if listing.things:
g.stats.simple_event('adzerk.request.valid_promo')
w = listing.things[0]
r = res_by_campaign[w.campaign]
up = UrlParser(r.imp_pixel)
up.hostname = "pixel.redditmedia.com"
w.adserver_imp_pixel = up.unparse()
w.adserver_click_url = r.click_url
w.num = ""
return spaceCompress(w.render())
else:
g.stats.simple_event('adzerk.request.skip_promo')
示例9: _key_from_url
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def _key_from_url(cls, url):
if not utils.domain(url) in g.case_sensitive_domains:
keyurl = _force_utf8(UrlParser.base_url(url.lower()))
else:
# Convert only hostname to lowercase
up = UrlParser(url)
up.hostname = up.hostname.lower()
keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
return keyurl
示例10: redirect_to_host
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def redirect_to_host(hostname, path=None):
"""Redirect (302) to the specified path and host."""
if path is None:
path = request.path
u = UrlParser(path)
u.hostname = hostname
# 307 redirect so request method is retained
abort(307, location=u.unparse())
示例11: POST_request_promo
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def POST_request_promo(self, srnames, is_mobile_web, platform, loid, is_refresh):
self.OPTIONS_request_promo()
if not srnames:
return
# backwards compat
if platform is None:
platform = "mobile_web" if is_mobile_web else "desktop"
srnames = srnames.split('+')
# request multiple ads in case some are hidden by the builder due
# to the user's hides/preferences
response = adzerk_request(srnames, self.get_uid(loid),
platform=platform)
if not response:
g.stats.simple_event('adzerk.request.no_promo')
return
# for adservers, adzerk returns markup so we pass it to the client
if isinstance(response, AdserverResponse):
g.stats.simple_event('adzerk.request.adserver')
return responsive(response.body)
res_by_campaign = {r.campaign: r for r in response}
adserver_click_urls = {r.campaign: r.click_url for r in response}
tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
keep_fn=promote.promo_keep_fn,
num=1,
skip=True)
listing = LinkListing(builder, nextprev=False).listing()
promote.add_trackers(listing.things, c.site, adserver_click_urls=adserver_click_urls)
promote.update_served(listing.things)
if listing.things:
g.stats.simple_event('adzerk.request.valid_promo')
if is_refresh:
g.stats.simple_event('adzerk.request.auto_refresh')
w = listing.things[0]
r = res_by_campaign[w.campaign]
up = UrlParser(r.imp_pixel)
up.hostname = "pixel.redditmedia.com"
w.adserver_imp_pixel = up.unparse()
w.adserver_upvote_pixel = r.upvote_pixel
w.adserver_downvote_pixel = r.downvote_pixel
w.adserver_click_url = r.click_url
w.num = ""
return responsive(w.render(), space_compress=True)
else:
g.stats.simple_event('adzerk.request.skip_promo')
示例12: __init__
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def __init__(self, original_path, subreddit, sub_domain):
Wrapped.__init__(self, original_path=original_path)
if sub_domain and subreddit and original_path:
self.title = "%s - %s" % (subreddit.title, sub_domain)
u = UrlParser(subreddit.path + original_path)
u.hostname = get_domain(cname = False, subreddit = False)
u.update_query(**request.get.copy())
u.put_in_frame()
self.frame_target = u.unparse()
else:
self.title = ""
self.frame_target = None
示例13: test_same_url
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def test_same_url(self):
u = UrlParser('http://example.com:8000/a;b?foo=bar&bar=baz#spam')
u2 = UrlParser('http://example.com:8000/a;b?bar=baz&foo=bar#spam')
self.assertEquals(u, u2)
u3 = UrlParser('')
u3.scheme = 'http'
u3.hostname = 'example.com'
u3.port = 8000
u3.path = '/a'
u3.params = 'b'
u3.update_query(foo='bar', bar='baz')
u3.fragment = 'spam'
self.assertEquals(u, u3)
示例14: test_replace_subreddit
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def test_replace_subreddit(self):
test_cases = [
('/r/VIDEOS/', '/r/videos/', '/r/videos/'),
('/r/VIDEOS/new/', '/r/videos/', '/r/videos/new/'),
('/r/VIDEOS/new/#cats', '/r/videos/', '/r/videos/new/#cats'),
('/user/dave/m/cats/', '', '/user/dave/m/cats/'),
]
for test_url, user_path, canonical_url in test_cases:
subreddit = mock.create_autospec(BaseSite, spec_set=True)
subreddit.user_path = user_path
url = UrlParser(test_url).canonicalize_subreddit_path(subreddit)
url.hostname = 'reddit'
print test_url, user_path, canonical_url
self.assertTrue(
url.is_canonically_equivalent(canonical_url),
'{0} is not equivalent to {1}'.format(
url, UrlParser(canonical_url)),
)
示例15: add_sr
# 需要导入模块: from r2.lib.utils import UrlParser [as 别名]
# 或者: from r2.lib.utils.UrlParser import hostname [as 别名]
def add_sr(path, sr_path=True, nocname=False, force_hostname=False):
"""
Given a path (which may be a full-fledged url or a relative path),
parses the path and updates it to include the subreddit path
according to the rules set by its arguments:
* force_hostname: if True, force the url's hotname to be updated
even if it is already set in the path, and subject to the
c.cname/nocname combination. If false, the path will still
have its domain updated if no hostname is specified in the url.
* nocname: when updating the hostname, overrides the value of
c.cname to set the hotname to g.domain. The default behavior
is to set the hostname consistent with c.cname.
* sr_path: if a cname is not used for the domain, updates the
path to include c.site.path.
For caching purposes: note that this function uses:
c.cname, c.render_style, c.site.name
"""
# don't do anything if it is just an anchor
if path.startswith("#") or path.startswith("javascript:"):
return path
u = UrlParser(path)
if sr_path and (nocname or not c.cname):
u.path_add_subreddit(c.site)
if not u.hostname or force_hostname:
u.hostname = get_domain(cname=(c.cname and not nocname), subreddit=False)
if c.render_style == "mobile":
u.set_extension("mobile")
elif c.render_style == "compact":
u.set_extension("compact")
return u.unparse()