當前位置: 首頁>>代碼示例>>Python>>正文


Python moves.urllib方法代碼示例

本文整理匯總了Python中six.moves.urllib方法的典型用法代碼示例。如果您正苦於以下問題:Python moves.urllib方法的具體用法?Python moves.urllib怎麽用?Python moves.urllib使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在six.moves的用法示例。


在下文中一共展示了moves.urllib方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _insert_username_and_password_to_repo_url

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def _insert_username_and_password_to_repo_url(url, username, password):
    """Insert the username and the password to the Git repo url, to make a
    component of the git clone command. This method can only be called when
    repo_url is an https url.

    Args:
        url (str): Git repo url where the token should be inserted into.
        username (str): Username to be inserted.
        password (str): Password to be inserted.

    Returns:
        str: the component needed for the git clone command.
    """
    password = urllib.parse.quote_plus(password)
    # urllib parses ' ' as '+', but what we need is '%20' here
    password = password.replace("+", "%20")
    index = len("https://")
    return url[:index] + username + ":" + password + "@" + url[index:] 
開發者ID:aws,項目名稱:sagemaker-python-sdk,代碼行數:20,代碼來源:git_utils.py

示例2: from_urls

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def from_urls(urls, timeout=None):
        """
        Crawls articles from the urls and extracts relevant information.
        :param urls:
        :param timeout: in seconds, if None, the urllib default is used
        :return: A dict containing given URLs as keys, and extracted information as corresponding values.
        """
        results = {}
        download_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        if len(urls) == 0:
            # Nested blocks of code should not be left empty.
            # When a block contains a comment, this block is not considered to be empty
            pass
        elif len(urls) == 1:
            url = urls[0]
            html = SimpleCrawler.fetch_url(url, timeout=timeout)
            results[url] = NewsPlease.from_html(html, url, download_date)
        else:
            results = SimpleCrawler.fetch_urls(urls)
            for url in results:
                results[url] = NewsPlease.from_html(results[url], url, download_date)

        return results 
開發者ID:fhamborg,項目名稱:news-please,代碼行數:26,代碼來源:__init__.py

示例3: extract_db_type_from_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def extract_db_type_from_uri(db_uri):
    """
    Parse the specified DB URI to extract the database type. Confirm the database type is
    supported. If a driver is specified, confirm it passes a plausible regex.
    """
    scheme = urllib.parse.urlparse(db_uri).scheme
    scheme_plus_count = scheme.count('+')

    if scheme_plus_count == 0:
        db_type = scheme
    elif scheme_plus_count == 1:
        db_type, _ = scheme.split('+')
    else:
        error_msg = "Invalid database URI: '%s'. %s" % (db_uri, _INVALID_DB_URI_MSG)
        raise MlflowException(error_msg, INVALID_PARAMETER_VALUE)

    _validate_db_type_string(db_type)

    return db_type 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:21,代碼來源:uri.py

示例4: _download_artifact_from_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def _download_artifact_from_uri(artifact_uri, output_path=None):
    """
    :param artifact_uri: The *absolute* URI of the artifact to download.
    :param output_path: The local filesystem path to which to download the artifact. If unspecified,
                        a local output path will be created.
    """
    parsed_uri = urllib.parse.urlparse(artifact_uri)
    prefix = ""
    if parsed_uri.scheme and not parsed_uri.path.startswith("/"):
        # relative path is a special case, urllib does not reconstruct it properly
        prefix = parsed_uri.scheme + ":"
        parsed_uri = parsed_uri._replace(scheme="")

    # For models:/ URIs, it doesn't make sense to initialize a ModelsArtifactRepository with only
    # the model name portion of the URI, then call download_artifacts with the version info.
    if ModelsArtifactRepository.is_models_uri(artifact_uri):
        root_uri = artifact_uri
        artifact_path = ""
    else:
        artifact_path = posixpath.basename(parsed_uri.path)
        parsed_uri = parsed_uri._replace(path=posixpath.dirname(parsed_uri.path))
        root_uri = prefix + urllib.parse.urlunparse(parsed_uri)

    return get_artifact_repository(artifact_uri=root_uri).download_artifacts(
        artifact_path=artifact_path, dst_path=output_path) 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:27,代碼來源:artifact_utils.py

示例5: turn_on_internet

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def turn_on_internet(verbose=False):
    """
    Restore internet access.  Not used, but kept in case it is needed.
    """

    global INTERNET_OFF
    global _orig_opener

    if not INTERNET_OFF:
        return

    INTERNET_OFF = False

    if verbose:
        print("Internet access enabled")

    urllib.request.install_opener(_orig_opener)

    socket.create_connection = socket_create_connection
    socket.socket.bind = socket_bind
    socket.socket.connect = socket_connect
    return socket 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:24,代碼來源:disable_internet.py

示例6: mdp_schema

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def mdp_schema():
    schema_filename = tempfile.NamedTemporaryFile().name
    urllib.request.urlretrieve(schema_url, schema_filename)
    urllib.request.urlcleanup()  # work around a bug in urllib under python 2.7 (https://stackoverflow.com/a/44734254)
    schema = SBESchema(include_message_size_header=True, use_description_as_message_name=True)
    try:
        from sbedecoder.generated import __messages__ as generated_messages
        schema.load(generated_messages)
    except:
        schema.parse(schema_filename)
    os.remove(schema_filename)
    return schema 
開發者ID:tfgm,項目名稱:sbedecoder,代碼行數:14,代碼來源:test_sbe_parser.py

示例7: secdef

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def secdef():
    secdef_filename = tempfile.NamedTemporaryFile().name
    urllib.request.urlretrieve(secdef_url, secdef_filename)
    urllib.request.urlcleanup()  # work around a bug in urllib under python 2.7 (https://stackoverflow.com/a/44734254)
    secdef = SecDef()
    secdef.load(secdef_filename)
    os.remove(secdef_filename)
    return secdef  # provide the fixture value 
開發者ID:tfgm,項目名稱:sbedecoder,代碼行數:10,代碼來源:test_secdef.py

示例8: parse_url

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def parse_url(url):
    """alias for urllib parse
    
    Args:
        url (str): url to parse
    """
    return six.moves.urllib.parse.urlparse(url) 
開發者ID:Bolton-and-Menk-GIS,項目名稱:restapi,代碼行數:9,代碼來源:rest_utils.py

示例9: from_html

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def from_html(html, url=None, download_date=None):
        """
        Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
        uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
        to extract the publishing date and title.
        :param html:
        :param url:
        :return:
        """
        extractor = article_extractor.Extractor(
            ['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])

        title_encoded = ''.encode()
        if not url:
            url = ''

        # if an url was given, we can use that as the filename
        filename = urllib.parse.quote_plus(url) + '.json'

        item = NewscrawlerItem()
        item['spider_response'] = DotMap()
        item['spider_response'].body = html
        item['url'] = url
        item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
        item['html_title'] = title_encoded
        item['rss_title'] = title_encoded
        item['local_path'] = None
        item['filename'] = filename
        item['download_date'] = download_date
        item['modified_date'] = None
        item = extractor.extract(item)

        tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
        final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
        return final_article 
開發者ID:fhamborg,項目名稱:news-please,代碼行數:37,代碼來源:__init__.py

示例10: from_url

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def from_url(url, timeout=None):
        """
        Crawls the article from the url and extracts relevant information.
        :param url:
        :param timeout: in seconds, if None, the urllib default is used
        :return: A NewsArticle object containing all the information of the article. Else, None.
        :rtype: NewsArticle, None
        """
        articles = NewsPlease.from_urls([url], timeout=timeout)
        if url in articles.keys():
            return articles[url]
        else:
            return None 
開發者ID:fhamborg,項目名稱:news-please,代碼行數:15,代碼來源:__init__.py

示例11: is_local_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def is_local_uri(uri):
    """Returns true if this is a local file path (/foo or file:/foo)."""
    scheme = urllib.parse.urlparse(uri).scheme
    return uri != 'databricks' and (scheme == '' or scheme == 'file') 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:6,代碼來源:uri.py

示例12: is_http_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def is_http_uri(uri):
    scheme = urllib.parse.urlparse(uri).scheme
    return scheme == 'http' or scheme == 'https' 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:5,代碼來源:uri.py

示例13: is_databricks_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def is_databricks_uri(uri):
    """Databricks URIs look like 'databricks' (default profile) or 'databricks://profile'"""
    scheme = urllib.parse.urlparse(uri).scheme
    return scheme == 'databricks' or uri == 'databricks' 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:6,代碼來源:uri.py

示例14: get_db_profile_from_uri

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def get_db_profile_from_uri(uri):
    """
    Get the Databricks profile specified by the tracking URI (if any), otherwise
    returns None.
    """
    parsed_uri = urllib.parse.urlparse(uri)
    if parsed_uri.scheme == "databricks":
        return parsed_uri.netloc
    return None 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:11,代碼來源:uri.py

示例15: extract_and_normalize_path

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import urllib [as 別名]
def extract_and_normalize_path(uri):
    parsed_uri_path = urllib.parse.urlparse(uri).path
    normalized_path = posixpath.normpath(parsed_uri_path)
    return normalized_path.lstrip("/") 
開發者ID:mlflow,項目名稱:mlflow,代碼行數:6,代碼來源:uri.py


注:本文中的six.moves.urllib方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。