当前位置: 首页>>代码示例>>Python>>正文


Python utils.UrlParser类代码示例

本文整理汇总了Python中r2.lib.utils.UrlParser的典型用法代码示例。如果您正苦于以下问题:Python UrlParser类的具体用法?Python UrlParser怎么用?Python UrlParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了UrlParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: POST_request_promo

    def POST_request_promo(self, srnames):
        if not srnames:
            return

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        res_by_campaign = {r.campaign: r for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_click_url = r.click_url
            w.num = ""
            return spaceCompress(w.render())
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
开发者ID:JordanMilne,项目名称:reddit-plugin-adzerk,代码行数:34,代码来源:adzerkpromote.py

示例2: process_message

    def process_message(msgs, chan):
        """Update get_domain_links(), the Links by domain precomputed query.

        get_domain_links() is a CachedResult which is stored in permacache. To
        update these objects we need to do a read-modify-write which requires
        obtaining a lock. Sharding these updates by domain allows us to run
        multiple consumers (but ideally just one per shard) to avoid lock
        contention.

        """

        from r2.lib.db.queries import add_queries, get_domain_links

        link_names = {msg.body for msg in msgs}
        links = Link._by_fullname(link_names, return_dict=False)
        print 'Processing %r' % (links,)

        links_by_domain = defaultdict(list)
        for link in links:
            parsed = UrlParser(link.url)

            # update the listings for all permutations of the link's domain
            for domain in parsed.domain_permutations():
                links_by_domain[domain].append(link)

        for d, links in links_by_domain.iteritems():
            with g.stats.get_timer("link_vote_processor.domain_queries"):
                add_queries(
                    queries=[
                        get_domain_links(d, sort, "all") for sort in SORTS],
                    insert_items=links,
                )
开发者ID:13steinj,项目名称:reddit,代码行数:32,代码来源:voting.py

示例3: GET_framebuster

    def GET_framebuster(self, what = None, blah = None):
        """
        renders the contents of the iframe which, on a cname, checks
        if the user is currently logged into reddit.
        
        if this page is hit from the primary domain, redirects to the
        cnamed domain version of the site.  If the user is logged in,
        this cnamed version will drop a boolean session cookie on that
        domain so that subsequent page reloads will be caught in
        middleware and a frame will be inserted around the content.

        If the user is not logged in, previous session cookies will be
        emptied so that subsequent refreshes will not be rendered in
        that pesky frame.
        """
        if not c.site.domain:
            return ""
        elif c.cname:
            return FrameBuster(login = (what == "login")).render()
        else:
            path = "/framebuster/"
            if c.user_is_loggedin:
                path += "login/"
            u = UrlParser(path + str(random.random()))
            u.mk_cname(require_frame = False, subreddit = c.site,
                       port = request.port)
            return self.redirect(u.unparse())
        # the user is not logged in or there is no cname.
        return FrameBuster(login = False).render()
开发者ID:JediWatchman,项目名称:reddit,代码行数:29,代码来源:front.py

示例4: purge_url

    def purge_url(self, url):
        """Purge an image (by url) from imgix.

        Reference: http://www.imgix.com/docs/tutorials/purging-images

        Note that as mentioned in the imgix docs, in order to remove
        an image, this function should be used *after* already
        removing the image from our source, or imgix will just re-fetch
        and replace the image with a new copy even after purging.
        """

        p = UrlParser(url)

        if p.hostname == g.imgix_domain:
            p.hostname = g.imgix_purge_domain
        elif p.hostname == g.imgix_gif_domain:
            p.hostname = g.imgix_gif_purge_domain

        url = p.unparse()

        requests.post(
            "https://api.imgix.com/v2/image/purger",
            auth=(g.secrets["imgix_api_key"], ""),
            data={"url": url},
        )
开发者ID:zeantsoi,项目名称:reddit,代码行数:25,代码来源:imgix.py

示例5: url_for_title

  def url_for_title(self, title):
      """Uses the MediaWiki API to get the URL for a wiki page
      with the given title"""
      if title is None:
          return None

      from pylons import g
      cache_key = ('wiki_url_%s' % title).encode('ascii', 'ignore')
      wiki_url = g.cache.get(cache_key)
      if wiki_url is None:
          # http://www.mediawiki.org/wiki/API:Query_-_Properties#info_.2F_in
          api = UrlParser(g.wiki_api_url)
          api.update_query(
              action = 'query',
              titles= title,
              prop = 'info',
              format = 'yaml',
              inprop = 'url'
          )

          try:
              response = urlopen(api.unparse()).read()
              parsed_response = yaml.load(response, Loader=yaml.CLoader)
              page = parsed_response['query']['pages'][0]
          except:
              return None

          wiki_url = page.get('fullurl').strip()

          # Things are created every couple of days so 12 hours seems
          # to be a reasonable cache time
          g.permacache.set(cache_key, wiki_url, time=3600 * 12)

      return wiki_url
开发者ID:Kenneth-Chen,项目名称:lesswrong,代码行数:34,代码来源:wiki.py

示例6: _update_redirect_uri

def _update_redirect_uri(base_redirect_uri, params, as_fragment=False):
    parsed = UrlParser(base_redirect_uri)
    if as_fragment:
        parsed.fragment = urlencode(params)
    else:
        parsed.update_query(**params)
    return parsed.unparse()
开发者ID:AHAMED750,项目名称:reddit,代码行数:7,代码来源:oauth2.py

示例7: format_output_url

    def format_output_url(cls, url, **kw):
        """
        Helper method used during redirect to ensure that the redirect
        url (assisted by frame busting code or javasctipt) will point
        to the correct domain and not have any extra dangling get
        parameters.  The extensions are also made to match and the
        resulting url is utf8 encoded.

        Node: for development purposes, also checks that the port
        matches the request port
        """
        preserve_extension = kw.pop("preserve_extension", True)
        u = UrlParser(url)

        if u.is_reddit_url():
            # make sure to pass the port along if not 80
            if not kw.has_key('port'):
                kw['port'] = request.port

            # make sure the extensions agree with the current page
            if preserve_extension and c.extension:
                u.set_extension(c.extension)

        # unparse and encode it un utf8
        rv = _force_unicode(u.unparse()).encode('utf8')
        if "\n" in rv or "\r" in rv:
            abort(400)
        return rv
开发者ID:VishnuTadimeti,项目名称:reddit,代码行数:28,代码来源:base.py

示例8: allowed_media_preview_url

def allowed_media_preview_url(url):
    p = UrlParser(url)
    if p.has_static_image_extension():
        return True
    for allowed_domain in g.media_preview_domain_whitelist:
        if is_subdomain(p.hostname, allowed_domain):
            return True
    return False
开发者ID:AHAMED750,项目名称:reddit,代码行数:8,代码来源:media.py

示例9: GET_framebuster

    def GET_framebuster(self):
        if c.site.domain and c.user_is_loggedin:
            u = UrlParser(c.site.path + "/frame")
            u.put_in_frame()
            c.cname = True
            return self.redirect(u.unparse())

        return "fail"
开发者ID:JoshuaDavid,项目名称:lesswrong-1,代码行数:8,代码来源:front.py

示例10: _key_from_url

 def _key_from_url(cls, url):
     if not utils.domain(url) in g.case_sensitive_domains:
         keyurl = _force_utf8(UrlParser.base_url(url.lower()))
     else:
         # Convert only hostname to lowercase
         up = UrlParser(url)
         up.hostname = up.hostname.lower()
         keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
     return keyurl
开发者ID:phektus,项目名称:dabuzz,代码行数:9,代码来源:link.py

示例11: redirect_to_host

def redirect_to_host(hostname, path=None):
    """Redirect (302) to the specified path and host."""
    if path is None:
        path = request.path

    u = UrlParser(path)
    u.hostname = hostname

    # 307 redirect so request method is retained
    abort(307, location=u.unparse())
开发者ID:13steinj,项目名称:reddit-plugin-betamode,代码行数:10,代码来源:betamode.py

示例12: make_scraper

def make_scraper(url):
    parsed = UrlParser(url)

    if parsed.is_reddit_url():
        if parsed.path.startswith("/live/"):
            try:
                event_id = parsed.path.split("/")[2]
            except IndexError:
                return
            else:
                return _LiveUpdateScraper(event_id)
开发者ID:reddit,项目名称:reddit-plugin-liveupdate,代码行数:11,代码来源:scraper.py

示例13: POST_request_promo

    def POST_request_promo(self, srnames, is_mobile_web, platform, loid, is_refresh):
        self.OPTIONS_request_promo()

        if not srnames:
            return

        # backwards compat
        if platform is None:
            platform = "mobile_web" if is_mobile_web else "desktop"

        srnames = srnames.split('+')

        # request multiple ads in case some are hidden by the builder due
        # to the user's hides/preferences
        response = adzerk_request(srnames, self.get_uid(loid),
                                  platform=platform)

        if not response:
            g.stats.simple_event('adzerk.request.no_promo')
            return

        # for adservers, adzerk returns markup so we pass it to the client
        if isinstance(response, AdserverResponse):
            g.stats.simple_event('adzerk.request.adserver')
            return responsive(response.body)

        res_by_campaign = {r.campaign: r for r in response}
        adserver_click_urls = {r.campaign: r.click_url for r in response}
        tuples = [promote.PromoTuple(r.link, 1., r.campaign) for r in response]
        builder = CampaignBuilder(tuples, wrap=default_thing_wrapper(),
                                  keep_fn=promote.promo_keep_fn,
                                  num=1,
                                  skip=True)
        listing = LinkListing(builder, nextprev=False).listing()
        promote.add_trackers(listing.things, c.site, adserver_click_urls=adserver_click_urls)
        promote.update_served(listing.things)
        if listing.things:
            g.stats.simple_event('adzerk.request.valid_promo')
            if is_refresh:
                g.stats.simple_event('adzerk.request.auto_refresh')

            w = listing.things[0]
            r = res_by_campaign[w.campaign]

            up = UrlParser(r.imp_pixel)
            up.hostname = "pixel.redditmedia.com"
            w.adserver_imp_pixel = up.unparse()
            w.adserver_upvote_pixel = r.upvote_pixel
            w.adserver_downvote_pixel = r.downvote_pixel
            w.adserver_click_url = r.click_url
            w.num = ""
            return responsive(w.render(), space_compress=True)
        else:
            g.stats.simple_event('adzerk.request.skip_promo')
开发者ID:nramadas,项目名称:reddit-plugin-adzerk,代码行数:54,代码来源:adzerkpromote.py

示例14: test_sign_url

    def test_sign_url(self):
        u = UrlParser('http://examples.imgix.net/frog.jpg?w=100')
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(signed_url.unparse(),
                'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')

        u = UrlParser('http://examples.imgix.net/frog.jpg')
        u.update_query(w=100)
        signed_url = self.provider._sign_url(u, 'abcdef')
        self.assertEqual(signed_url.unparse(),
                'http://examples.imgix.net/frog.jpg?w=100&s=cd3bdf071108af73b15c21bdcee5e49c')
开发者ID:ActivateServices,项目名称:reddit,代码行数:11,代码来源:imgix_test.py

示例15: add_to_domain_query_q

def add_to_domain_query_q(link):
    parsed = UrlParser(link.url)
    if not parsed.domain_permutations():
        # no valid domains found
        return

    if g.shard_domain_query_queues:
        domain_shard = hash(parsed.hostname) % 10
        queue_name = "domain_query_%s_q" % domain_shard
    else:
        queue_name = "domain_query_q"
    amqp.add_item(queue_name, link._fullname)
开发者ID:13steinj,项目名称:reddit,代码行数:12,代码来源:voting.py


注:本文中的r2.lib.utils.UrlParser类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。