本文整理汇总了Python中pip.download.PipSession类的典型用法代码示例。如果您正苦于以下问题:Python PipSession类的具体用法?Python PipSession怎么用?Python PipSession使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PipSession类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_session
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
示例2: _build_session
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
示例3: _build_session
def _build_session(self, options):
session = PipSession(retries=options.retries)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
示例4: _get_content_type
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme not in ('http', 'https', 'ftp', 'ftps'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
示例5: get_versions
def get_versions(package):
host = "https://pypi.python.org/simple/"
url = urlparse.urljoin(host, package)
url = url + '/'
session = PipSession()
session.timeout = 15
session.auth.prmpting = True
pf = PackageFinder(find_links=[], index_urls=host, use_wheel=True, allow_external=[], allow_unverified=[], allow_all_external=False, allow_all_prereleases=False, process_dependency_links=False, session=session,)
location = [Link(url, trusted=True)]
req = InstallRequirement.from_line(package, None)
versions = []
for page in pf._get_pages(location, req):
versions = versions + [version for _, _, version in pf._package_versions(page.links, package)]
return versions
示例6: _build_session
def _build_session(self, options):
session = PipSession(
cache=normalize_path(os.path.join(options.cache_dir, "http")),
retries=options.retries,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
elif options.no_check_certificate:
session.verify = False
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout:
session.timeout = options.timeout
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
示例7: get_page
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug(
'Cannot look at %(scheme)s URL %(link)s' % locals()
)
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: '
'%s' % (link, content_type)
)
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s' %
(link, content_type)
)
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(
req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
示例8: get_page
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split("#", 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
logger.debug("Cannot look at %(scheme)s URL %(link)s" % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in [".tar", ".tar.gz", ".tar.bz2", ".tgz", ".zip"]:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url, session=session)
if content_type.lower().startswith("text/html"):
break
else:
logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug("Getting page %s" % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == "file" and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith("/"):
url += "/"
url = urlparse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s" % url)
resp = session.get(url)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get("Content-Type", "unknown")
if not content_type.lower().startswith("text/html"):
logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: " "%s" % exc
cls._handle_fail(req, link, reason, url, cache=cache, level=2, meth=logger.notify)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst