当前位置: 首页>>代码示例>>Python>>正文


Python parse.urljoin方法代码示例

本文整理汇总了Python中six.moves.urllib.parse.urljoin方法的典型用法代码示例。如果您正苦于以下问题:Python parse.urljoin方法的具体用法?Python parse.urljoin怎么用?Python parse.urljoin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在six.moves.urllib.parse的用法示例。


在下文中一共展示了parse.urljoin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _extract_links

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _extract_links(self, selector, response_url, response_encoding, base_url):
        '''
        Pretty much the same function, just added 'ignore' to to_native_str()
        '''
        links = []
        # hacky way to get the underlying lxml parsed document
        for el, attr, attr_val in self._iter_links(selector.root):
            # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
            try:
                attr_val = urljoin(base_url, attr_val)
            except ValueError:
                continue # skipping bogus links
            else:
                url = self.process_attr(attr_val)
                if url is None:
                    continue
            # added 'ignore' to encoding errors
            url = to_native_str(url, encoding=response_encoding,
                                errors='ignore')
            # to fix relative links after process_value
            url = urljoin(response_url, url)
            link = Link(url, _collect_string_content(el) or u'',
                        nofollow=rel_has_nofollow(el.get('rel')))
            links.append(link)
        return self._deduplicate_if_needed(links) 
开发者ID:istresearch,项目名称:scrapy-cluster,代码行数:27,代码来源:lxmlhtml.py

示例2: _get_oauth_token

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _get_oauth_token(self):
        """
        Get Monzo access token via OAuth2 `authorization code` grant type.

        Official docs:
            https://monzo.com/docs/#acquire-an-access-token

        :returns: OAuth 2 access token
        :rtype: dict
        """
        url = urljoin(self.api_url, '/oauth2/token')

        oauth = OAuth2Session(
            client_id=self._client_id,
            redirect_uri=config.REDIRECT_URI,
        )

        token = oauth.fetch_token(
            token_url=url,
            code=self._auth_code,
            client_secret=self._client_secret,
        )

        return token 
开发者ID:pawelad,项目名称:pymonzo,代码行数:26,代码来源:monzo_api.py

示例3: test_class_get_oauth_token_method

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def test_class_get_oauth_token_method(self, mocker, mocked_monzo):
        """Test class `_get_oauth_token` method"""
        mocked_fetch_token = mocker.MagicMock()
        mocked_oauth2_session = mocker.patch('pymonzo.monzo_api.OAuth2Session')
        mocked_oauth2_session.return_value.fetch_token = mocked_fetch_token

        token = mocked_monzo._get_oauth_token()

        assert token == mocked_fetch_token.return_value

        mocked_oauth2_session.assert_called_once_with(
            client_id=mocked_monzo._client_id,
            redirect_uri=config.REDIRECT_URI,
        )
        mocked_fetch_token.assert_called_once_with(
            token_url=urljoin(mocked_monzo.api_url, '/oauth2/token'),
            code=mocked_monzo._auth_code,
            client_secret=mocked_monzo._client_secret,
        ) 
开发者ID:pawelad,项目名称:pymonzo,代码行数:21,代码来源:test_monzo_api.py

示例4: _construct_url

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _construct_url(self, relative_path, query_params=None, extattrs=None):
        if query_params is None:
            query_params = {}
        if extattrs is None:
            extattrs = {}

        if not relative_path or relative_path[0] == '/':
            raise ValueError('Path in request must be relative.')
        query = ''
        if query_params or extattrs:
            query = '?'

        if extattrs:
            attrs_queries = []
            for key, value in extattrs.items():
                LOG.debug("key: %s, value: %s", key, value)
                attrs_queries.append('*' + key + '=' + value['value'])
            query += '&'.join(attrs_queries)
        if query_params:
            if len(query) > 1:
                query += '&'
            query += parse.urlencode(query_params)

        baseurl = parse.urljoin(self.wapi_url, parse.quote(relative_path))
        return baseurl + query 
开发者ID:openstack,项目名称:designate,代码行数:27,代码来源:connector.py

示例5: place_new_order

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def place_new_order(self, stock, price, qty, direction, order_type):
        """Place an order for a stock.

        https://starfighter.readme.io/docs/place-new-order
        """
        url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format(
            venue=self.venue,
            stock=stock,
        )
        data = {
          "stock": stock,
          "price": price,
          "venue": self.venue,
          "account": self.account,
          "qty": qty,
          "direction": direction,
          "orderType": order_type,
        }
        url = urljoin(self.base_url, url_fragment)
        resp = self.session.post(url, json=data)
        return resp.json() 
开发者ID:striglia,项目名称:stockfighter,代码行数:23,代码来源:stockfighter.py

示例6: clean_url

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def clean_url(url, base_url=None):
    """Add base netloc and path to internal URLs and remove www, fragments."""
    parsed_url = urlparse(url)

    fragment = "{url.fragment}".format(url=parsed_url)
    if fragment:
        url = url.split(fragment)[0]

    # Identify internal URLs and fix their format
    netloc = "{url.netloc}".format(url=parsed_url)
    if base_url is not None and not netloc:
        parsed_base = urlparse(base_url)
        split_base = "{url.scheme}://{url.netloc}{url.path}/".format(url=parsed_base)
        url = urljoin(split_base, url)
        netloc = "{url.netloc}".format(url=urlparse(url))

    if "www." in netloc:
        url = url.replace(netloc, netloc.replace("www.", ""))
    return url.rstrip(string.punctuation) 
开发者ID:huntrar,项目名称:scrape,代码行数:21,代码来源:utils.py

示例7: is_online

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def is_online(self, id):
        """Returns whether a product is online

        Parameters
        ----------
        id : string
            UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'

        Returns
        -------
        bool
            True if online, False if in LTA

        """
        # Check https://scihub.copernicus.eu/userguide/ODataAPI#Products_entity for more information

        url = urljoin(self.api_url, "odata/v1/Products('{}')/Online/$value".format(id))
        r = self.session.get(url, auth=self.session.auth, timeout=self.timeout)
        _check_scihub_response(r)
        return r.json() 
开发者ID:sentinelsat,项目名称:sentinelsat,代码行数:22,代码来源:sentinel.py

示例8: __init__

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def __init__(self, destination_params, transport):
        self.transport = transport
        remote_host = destination_params.get("url")
        assert remote_host is not None, "Failed to determine url for Pulsar client."
        if not remote_host.startswith("http"):
            remote_host = "http://%s" % remote_host
        manager = destination_params.get("manager", None)
        if manager:
            if "/managers/" in remote_host:
                log.warning("Ignoring manager tag '%s', Pulsar client URL already contains a \"/managers/\" path." % manager)
            else:
                remote_host = urljoin(remote_host, "managers/%s" % manager)
        if not remote_host.endswith("/"):
            remote_host = "%s/" % remote_host
        self.remote_host = remote_host
        self.private_token = destination_params.get("private_token", None) 
开发者ID:galaxyproject,项目名称:pulsar,代码行数:18,代码来源:interface.py

示例9: prepare_request

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def prepare_request(self, request):
        parsed = parse.urlparse(request.url)
        # 非法字符检查
        if ENV['REQUEST_ARGUMENTS_CHECK'] and (not parsed.netloc or parsed.netloc == parse.urlparse(self.host).netloc):
            for k, v in reduce(lambda x, y: x + list(y.items()), (request.params, request.data), []):
                pattern = ENV['ILLEGAL_CHARACTERS_PATTERN']
                result = pattern.search(str(k)) or pattern.search(str(v))
                if result:
                    msg = ''.join(['参数中出现非法字符: ', result.group()])
                    raise ValidationError(msg)
        if not parsed.netloc:
            # requests 在准备 url 进行解析, 因此只能在准备前将 url 换成完整的地址
            # requests.models.PreparedRequest#prepare_url
            request.url = parse.urljoin(self.host, request.url)

        return super(BaseSession, self).prepare_request(request) 
开发者ID:elonzh,项目名称:hfut,代码行数:18,代码来源:session.py

示例10: get_upload_channels

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def get_upload_channels(upload_config_dir, subdir, channels=None):
    """thought here was to provide whatever channel you have set as an output also to be an input

    Killed this in favor of setting channels in condarc in the docker image.
    """
    configurations = load_yaml_config_dir(upload_config_dir)
    channels = channels or []

    for config in configurations:
        if 'token' in config:
            channels.append(config['user'])
        elif 'server' in config:
            channels.append(parse.urljoin('http://' + config['server'],
                            config['destination_path'].format(subdir=subdir)))
        else:
            channels.append(config['channel'])
    return channels 
开发者ID:conda,项目名称:conda-concourse-ci,代码行数:19,代码来源:uploads.py

示例11: process_response

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def process_response(self, request, response, spider):
        if (request.meta.get('dont_redirect', False) or
                response.status in getattr(spider, 'handle_httpstatus_list', []) or
                response.status in request.meta.get('handle_httpstatus_list', []) or
                request.meta.get('handle_httpstatus_all', False)):
            return response

        allowed_status = (301, 302, 303, 307, 308)
        if 'Location' not in response.headers or response.status not in allowed_status:
            return response

        location = safe_url_string(response.headers['location'])

        redirected_url = urljoin(request.url, location)

        if response.status in (301, 307, 308) or request.method == 'HEAD':
            redirected = request.replace(url=redirected_url)
            return self._redirect(redirected, request, spider, response.status)

        redirected = self._redirect_request_using_get(request, redirected_url)
        return self._redirect(redirected, request, spider, response.status) 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:23,代码来源:redirect.py

示例12: _extract_links

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
        def clean_text(text):
            return replace_escape_chars(remove_tags(text.decode(response_encoding))).strip()

        def clean_url(url):
            clean_url = ''
            try:
                clean_url = urljoin(base_url, replace_entities(clean_link(url.decode(response_encoding))))
            except ValueError:
                pass
            return clean_url

        if base_url is None:
            base_url = get_base_url(response_text, response_url, response_encoding)

        links_text = linkre.findall(response_text)
        return [Link(clean_url(url).encode(response_encoding),
                     clean_text(text))
                for url, _, text in links_text] 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:21,代码来源:regex.py

示例13: _extract_links

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _extract_links(self, selector, response_url, response_encoding, base_url):
        links = []
        # hacky way to get the underlying lxml parsed document
        for el, attr, attr_val in self._iter_links(selector.root):
            # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
            try:
                if self.strip:
                    attr_val = strip_html5_whitespace(attr_val)
                attr_val = urljoin(base_url, attr_val)
            except ValueError:
                continue  # skipping bogus links
            else:
                url = self.process_attr(attr_val)
                if url is None:
                    continue
            url = to_native_str(url, encoding=response_encoding)
            # to fix relative links after process_value
            url = urljoin(response_url, url)
            link = Link(url, _collect_string_content(el) or u'',
                        nofollow=rel_has_nofollow(el.get('rel')))
            links.append(link)
        return self._deduplicate_if_needed(links) 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:24,代码来源:lxmlhtml.py

示例14: _extract_links

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
        """ Do the real extraction work """
        self.reset()
        self.feed(response_text)
        self.close()

        ret = []
        if base_url is None:
            base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
        for link in self.links:
            if isinstance(link.url, six.text_type):
                link.url = link.url.encode(response_encoding)
            try:
                link.url = urljoin(base_url, link.url)
            except ValueError:
                continue
            link.url = safe_url_string(link.url, response_encoding)
            link.text = to_unicode(link.text, response_encoding, errors='replace').strip()
            ret.append(link)

        return ret 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:23,代码来源:sgml.py

示例15: _extract_links

# 需要导入模块: from six.moves.urllib import parse [as 别名]
# 或者: from six.moves.urllib.parse import urljoin [as 别名]
def _extract_links(self, response_text, response_url, response_encoding):
        self.reset()
        self.feed(response_text)
        self.close()

        links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links

        ret = []
        base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
        for link in links:
            if isinstance(link.url, six.text_type):
                link.url = link.url.encode(response_encoding)
            try:
                link.url = urljoin(base_url, link.url)
            except ValueError:
                continue
            link.url = safe_url_string(link.url, response_encoding)
            link.text = link.text.decode(response_encoding)
            ret.append(link)

        return ret 
开发者ID:wistbean,项目名称:learn_python3_spider,代码行数:23,代码来源:htmlparser.py


注:本文中的six.moves.urllib.parse.urljoin方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。