本文整理汇总了Python中r2.lib.utils.domain函数的典型用法代码示例。如果您正苦于以下问题:Python domain函数的具体用法?Python domain怎么用?Python domain使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了domain函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_context_data
def get_context_data(self, request, context):
"""Extract common data from the current request and context
This is generally done explicitly in `__init__`, but is done by hand for
votes before the request context is lost by the queuing.
request, context: Should be pylons.request & pylons.c respectively
"""
data = {}
if context.user_is_loggedin:
data["user_id"] = context.user._id
data["user_name"] = context.user.name
else:
loid = request.cookies.get("loid", None)
if loid:
data["loid"] = loid
oauth2_client = getattr(context, "oauth2_client", None)
if oauth2_client:
data["oauth2_client_id"] = oauth2_client._id
data["geoip_country"] = get_request_location(request, context)
data["domain"] = request.host
data["user_agent"] = request.user_agent
http_referrer = request.headers.get("Referer", None)
if http_referrer:
data["referrer_url"] = http_referrer
data["referrer_domain"] = domain(http_referrer)
return data
示例2: for_url
def for_url(cls, embedly_services, url):
url_domain = domain(url)
domain_embedly_regex = embedly_services.get(url_domain, None)
if domain_embedly_regex and re.match(domain_embedly_regex, url):
return _EmbedlyScraper(url)
return _ThumbnailOnlyScraper(url)
示例3: valid_url
def valid_url(prop,value,report):
"""
checks url(...) arguments in CSS, ensuring that the contents are
officially sanctioned. Sanctioned urls include:
* anything in /static/
* image labels %%..%% for images uploaded on /about/stylesheet
* urls with domains in g.allowed_css_linked_domains
"""
url = value.getStringValue()
# local urls are allowed
if local_urls.match(url):
pass
# custom urls are allowed, but need to be transformed into a real path
elif custom_img_urls.match(url):
name = custom_img_urls.match(url).group(1)
# the label -> image number lookup is stored on the subreddit
if c.site.images.has_key(name):
num = c.site.images[name]
value._setCssText("url(http:/%s%s_%d.png?v=%s)"
% (g.s3_thumb_bucket, c.site._fullname, num,
randstr(36)))
else:
# unknown image label -> error
report.append(ValidationError(msgs['broken_url']
% dict(brokenurl = value.cssText),
value))
# allowed domains are ok
elif domain(url) in g.allowed_css_linked_domains:
pass
else:
report.append(ValidationError(msgs['broken_url']
% dict(brokenurl = value.cssText),
value))
示例4: submit_rss_links
def submit_rss_links(srname,rss,user,titlefield='title',linkfield='link'):
#Fuck the API, let's just do it the way we would if we were really doing it. This avoids screwing around with cookies and so forth...
feed=fetch_feed(rss)
if feed is None:
return
ac=Account._byID(user)
sr=Subsciteit._by_name(srname)
ip='0.0.0.0'
niceify=False
if domain(rss)=="arxiv.org":
niceify=dict(find="\(arXiv:.*?\)",replace="")
#Let's randomize why not...
random.shuffle(feed.entries)
for article in feed.entries:
#This can take all night if it has to, we don't want to hammer the server into oblivios
sleep(1)
kw = fetch_article(article,titlefield=titlefield,linkfield=linkfield,niceify=niceify)
if kw is None:
continue
l = Link._submit(kw['title'],kw['link'],ac,sr,ip,spam=False)
l._commit()
l.set_url_cache()
#We don't really need auto-submitted links to be vote on...
queries.queue_vote(ac,l,True,ip,cheater=False)
queries.new_link(l)
changed(l)
print "Submitted %s" % article[titlefield]
sleep(.1)
return
示例5: _make_media_object
def _make_media_object(self, oembed):
if oembed.get("type") in ("video", "rich"):
return {
"type": domain(self.url),
"oembed": oembed,
}
return None
示例6: scrape
def scrape(self):
params = urllib.urlencode({
"url": self.url,
"format": "json",
"maxwidth": 600,
"key": g.embedly_api_key,
})
content = requests.get(self.EMBEDLY_API_URL + "?" + params).content
oembed = json.loads(content, object_hook=self._utf8_encode)
if not oembed:
return None, None
if oembed.get("type") == "photo":
thumbnail_url = oembed.get("url")
else:
thumbnail_url = oembed.get("thumbnail_url")
thumbnail = _make_thumbnail_from_url(thumbnail_url, referer=self.url)
embed = {}
if oembed.get("type") in ("video", "rich"):
embed = {
"type": domain(self.url),
"oembed": oembed,
}
return thumbnail, embed
示例7: add_props
def add_props(cls, user, wrapped):
from r2.lib.count import incr_counts
from r2.lib.media import thumbnail_url
from r2.lib.utils import timeago
saved = Link._saved(user, wrapped) if user else {}
hidden = Link._hidden(user, wrapped) if user else {}
#clicked = Link._clicked(user, wrapped) if user else {}
clicked = {}
for item in wrapped:
show_media = (c.user.pref_media == 'on' or
(item.promoted and item.has_thumbnail
and c.user.pref_media != 'off') or
(c.user.pref_media == 'subreddit' and
item.subreddit.show_media))
if not show_media:
item.thumbnail = ""
elif item.has_thumbnail:
item.thumbnail = thumbnail_url(item)
else:
item.thumbnail = g.default_thumb
item.score = max(0, item.score)
item.domain = (domain(item.url) if not item.is_self
else 'self.' + item.subreddit.name)
if not hasattr(item,'top_link'):
item.top_link = False
item.urlprefix = ''
item.saved = bool(saved.get((user, item, 'save')))
item.hidden = bool(hidden.get((user, item, 'hide')))
item.clicked = bool(clicked.get((user, item, 'click')))
item.num = None
item.score_fmt = Score.number_only
item.permalink = item.make_permalink(item.subreddit)
if item.is_self:
item.url = item.make_permalink(item.subreddit, force_domain = True)
if c.user_is_admin:
item.hide_score = False
elif item.promoted:
item.hide_score = True
elif c.user == item.author:
item.hide_score = False
elif item._date > timeago("2 hours"):
item.hide_score = True
else:
item.hide_score = False
if c.user_is_loggedin and item.author._id == c.user._id:
item.nofollow = False
elif item.score <= 1 or item._spam or item.author._spam:
item.nofollow = True
else:
item.nofollow = False
if c.user_is_loggedin:
incr_counts(wrapped)
示例8: _key_from_url
def _key_from_url(cls, url):
if not utils.domain(url) in g.case_sensitive_domains:
keyurl = _force_utf8(UrlParser.base_url(url.lower()))
else:
# Convert only hostname to lowercase
up = UrlParser(url)
up.hostname = up.hostname.lower()
keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
return keyurl
示例9: validate_link
def validate_link(url,whitelist=False):
if url:
url=sanitize_url(url)
if url:
if whitelist and domain(url) not in DOMAIN_WHITELIST:
print "Domain %s not in whitelist." % domain(url)
return False
try:
lbu = LinksByUrl._byID(LinksByUrl._key_from_url(url))
except tdb_cassandra.NotFound:
return url
link_id36s = lbu._values()
links = Link._byID36(link_id36s, data=True, return_dict=False)
links = [l for l in links if not l._deleted]
if len(links)==0:
return url
print "Link %s exists..." % url
return False
示例10: GET_s
def GET_s(self, rest):
"""/s/http://..., show a given URL with the toolbar. if it's
submitted, redirect to /tb/$id36"""
force_html()
path = demangle_url(request.fullpath)
if not path:
# it was malformed
self.abort404()
# if the domain is shame-banned, bail out.
if is_shamed_domain(path)[0]:
self.abort404()
listing = hot_links_by_url_listing(path, sr=c.site, num=1)
link = listing.things[0] if listing.things else None
if c.cname and not c.authorized_cname:
# In this case, we make some bad guesses caused by the
# cname frame on unauthorised cnames.
# 1. User types http://foo.com/http://myurl?cheese=brie
# (where foo.com is an unauthorised cname)
# 2. We generate a frame that points to
# http://www.reddit.com/r/foo/http://myurl?cnameframe=0.12345&cheese=brie
# 3. Because we accept everything after the /r/foo/, and
# we've now parsed, modified, and reconstituted that
# URL to add cnameframe, we really can't make any good
# assumptions about what we've done to a potentially
# already broken URL, and we can't assume that we've
# rebuilt it in the way that it was originally
# submitted (if it was)
# We could try to work around this with more guesses (by
# having demangle_url try to remove that param, hoping
# that it's not already a malformed URL, and that we
# haven't re-ordered the GET params, removed
# double-slashes, etc), but for now, we'll just refuse to
# do this operation
return self.abort404()
if link:
# we were able to find it, let's send them to the
# link-id-based URL so that their URL is reusable
return self.redirect(add_sr("/tb/" + link._id36))
title = utils.domain(path)
res = Frame(
title=title,
url=match_current_reddit_subdomain(path),
)
# we don't want clients to think that this URL is actually a
# valid URL for search-indexing or the like
request.environ['usable_error_content'] = spaceCompress(res.render())
abort(404)
示例11: quarantine_event
def quarantine_event(self, event_type, subreddit,
request=None, context=None):
"""Create a 'quarantine' event for event-collector.
event_type: quarantine_interstitial_view, quarantine_opt_in,
quarantine_opt_out, quarantine_interstitial_dismiss
subreddit: The quarantined subreddit
request, context: Should be pylons.request & pylons.c respectively;
used to build the base Event
"""
event = EventV2(
topic="quarantine",
event_type=event_type,
request=request,
context=context,
)
if context:
if context.user_is_loggedin:
event.add("verified_email", context.user.email_verified)
else:
event.add("verified_email", False)
event.add("sr_id", subreddit._id)
event.add("sr_name", subreddit.name)
# Due to the redirect, the request object being sent isn't the
# original, so referrer and action data is missing for certain events
if request and (event_type == "quarantine_interstitial_view" or
event_type == "quarantine_opt_out"):
request_vars = request.environ["pylons.routes_dict"]
event.add("sr_action", request_vars.get("action", None))
# The thing_id the user is trying to view is a comment
if request.environ["pylons.routes_dict"].get("comment", None):
thing_id36 = request_vars.get("comment", None)
# The thing_id is a link
else:
thing_id36 = request_vars.get("article", None)
if thing_id36:
event.add("thing_id", int(thing_id36, 36))
referrer_url = request.headers.get('Referer', None)
if referrer_url:
event.add("referrer_url", referrer_url)
event.add("referrer_domain", domain(referrer_url))
self.save_event(event)
示例12: make_scraper
def make_scraper(url):
domain = utils.domain(url)
scraper = Scraper
for suffix, cls in scrapers.iteritems():
if domain.endswith(suffix):
scraper = cls
break
#sometimes youtube scrapers masquerade as google scrapers
if scraper == GootubeScraper:
youtube_url = youtube_in_google(url)
if youtube_url:
return make_scraper(youtube_url)
return scraper(url)
示例13: on_crawlable_domain
def on_crawlable_domain(self):
# This ensures we don't have the port included.
requested_domain = utils.domain(request.host)
# If someone CNAMEs myspammysite.com to reddit.com or something, we
# don't want search engines to index that.
if not utils.is_subdomain(requested_domain, g.domain):
return False
# Only allow the canonical desktop site and mobile subdomains, since
# we have canonicalization set up appropriately for them.
# Note: in development, DomainMiddleware needs to be temporarily
# modified to not skip assignment of reddit-domain-extension on
# localhost for this to work properly.
return (requested_domain == g.domain or
request.environ.get('reddit-domain-extension') in
('mobile', 'compact'))
示例14: get_context_data
def get_context_data(self, request, context):
"""Extract common data from the current request and context
This is generally done explicitly in `__init__`, but is done by hand for
votes before the request context is lost by the queuing.
request, context: Should be pylons.request & pylons.c respectively
"""
data = {}
if context.user_is_loggedin:
data["user_id"] = context.user._id
data["user_name"] = context.user.name
else:
if context.loid:
data.update(context.loid.to_dict())
oauth2_client = getattr(context, "oauth2_client", None)
if oauth2_client:
data["oauth2_client_id"] = oauth2_client._id
data["oauth2_client_name"] = oauth2_client.name
data["oauth2_client_app_type"] = oauth2_client.app_type
data["geoip_country"] = get_request_location(request, context)
data["domain"] = request.host
data["user_agent"] = request.user_agent
data["user_agent_parsed"] = parse_agent(request.user_agent)
http_referrer = request.headers.get("Referer", None)
if http_referrer:
data["referrer_url"] = http_referrer
data["referrer_domain"] = domain(http_referrer)
hooks.get_hook("eventcollector.context_data").call(
data=data,
user=context.user,
request=request,
context=context,
)
return data
示例15: add_props
def add_props(cls, user, wrapped):
from r2.lib.count import incr_counts
saved = Link._saved(user, wrapped) if user else {}
hidden = Link._hidden(user, wrapped) if user else {}
#clicked = Link._clicked(user, wrapped) if user else {}
clicked = {}
for item in wrapped:
item.score = max(0, item.score)
item.domain = (domain(item.url) if not item.is_self
else 'self.' + item.subreddit.name)
item.top_link = False
item.urlprefix = ''
item.saved = bool(saved.get((user, item, 'save')))
item.hidden = bool(hidden.get((user, item, 'hide')))
item.clicked = bool(clicked.get((user, item, 'click')))
item.num = None
item.score_fmt = Score.number_only
if c.user_is_loggedin:
incr_counts(wrapped)