本文整理汇总了Python中pex.link.Link类的典型用法代码示例。如果您正苦于以下问题:Python Link类的具体用法?Python Link怎么用?Python Link使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Link类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_requests_context
def test_requests_context():
context = RequestsContext(verify=False)
with make_url(BLOB, make_md5(BLOB)) as url:
assert context.read(Link.wrap(url)) == BLOB
with make_url(BLOB, make_md5(BLOB)) as url:
filename = context.fetch(Link.wrap(url))
with open(filename, 'rb') as fp:
assert fp.read() == BLOB
# test local reading
with named_temporary_file() as tf:
tf.write(b'goop')
tf.flush()
assert context.read(Link.wrap(tf.name)) == b'goop'
示例2: test_urllib_context_utf8_encoding
def test_urllib_context_utf8_encoding():
BYTES = b'this is a decoded utf8 string'
with named_temporary_file() as tf:
tf.write(BYTES)
tf.flush()
local_link = Link.wrap(tf.name)
# Trick UrllibContext into thinking this is a remote link
class MockUrllibContext(UrllibContext):
def open(self, link):
return super(MockUrllibContext, self).open(local_link)
context = MockUrllibContext()
assert context.content(Link.wrap('http://www.google.com')) == BYTES.decode(
UrllibContext.DEFAULT_ENCODING)
示例3: crawl
def crawl(self, link_or_links, follow_links=False):
links = list(Link.wrap_iterable(link_or_links))
cache_key = self._make_cache_key(links, follow_links)
# Memoize crawling to a global Memoizer (Crawler._CRAWL_CACHE).
result = self._CRAWL_CACHE.get(cache_key)
if result is None:
result = self._crawl(links, follow_links)
self._CRAWL_CACHE.store(cache_key, result)
return result
示例4: test_requests_context_retries_connect_timeout
def test_requests_context_retries_connect_timeout():
with mock.patch.object(
requests.packages.urllib3.connectionpool.HTTPConnectionPool,
'_make_request') as mock_make_request:
url, mock_make_request.side_effect = timeout_side_effect()
context = RequestsContext(verify=False)
data = context.read(Link.wrap(url))
assert data == BLOB
示例5: test_requests_context_retries_connect_timeout_retries_exhausted
def test_requests_context_retries_connect_timeout_retries_exhausted():
with mock.patch.object(
requests.packages.urllib3.connectionpool.HTTPConnectionPool,
'_make_request') as mock_make_request:
url, mock_make_request.side_effect = timeout_side_effect(num_timeouts=3)
context = RequestsContext(verify=False, max_retries=2)
with pytest.raises(Context.Error):
context.read(Link.wrap(url))
示例6: test_requests_context_retries_connect_timeout_retries_exhausted
def test_requests_context_retries_connect_timeout_retries_exhausted():
with mock.patch.object(
requests.packages.urllib3.connectionpool.HTTPConnectionPool,
'_make_request') as mock_make_request:
url, mock_make_request.side_effect = timeout_side_effect(num_timeouts=3)
env = Variables(environ={'PEX_HTTP_RETRIES': '2'})
context = RequestsContext(verify=False, env=env)
with pytest.raises(Context.Error):
context.read(Link.wrap(url))
示例7: test_crawler_local
def test_crawler_local():
FL = ('a.txt', 'b.txt', 'c.txt')
with temporary_dir() as td:
for fn in FL:
with open(os.path.join(td, fn), 'w'):
pass
for dn in (1, 2):
os.mkdir(os.path.join(td, 'dir%d' % dn))
for fn in FL:
with open(os.path.join(td, 'dir%d' % dn, fn), 'w'):
pass
# basic file / dir rel splitting
links, rels = Crawler.crawl_local(Link.wrap(td))
assert set(links) == set(Link.wrap(os.path.join(td, fn)) for fn in FL)
assert set(rels) == set(Link.wrap(os.path.join(td, 'dir%d' % n)) for n in (1, 2))
# recursive crawling, single vs multi-threaded
for caching in (False, True):
for threads in (1, 2, 3):
links = Crawler(threads=threads).crawl([td], follow_links=True)
expect_links = (set(Link.wrap(os.path.join(td, fn)) for fn in FL) |
set(Link.wrap(os.path.join(td, 'dir1', fn)) for fn in FL) |
set(Link.wrap(os.path.join(td, 'dir2', fn)) for fn in FL))
assert set(links) == expect_links
示例8: test_link_wrapping
def test_link_wrapping():
link = Link.wrap("https://www.google.com")
assert link.url == "https://www.google.com"
link = Link.wrap(Link.wrap("https://www.google.com"))
assert link.url == "https://www.google.com"
with pytest.raises(ValueError):
Link.wrap(1234)
with pytest.raises(ValueError):
Link.wrap_iterable(1234)
links = Link.wrap_iterable("https://www.google.com")
assert len(links) == 1
assert links[0].url == "https://www.google.com"
links = Link.wrap_iterable(["https://www.google.com", Link("http://www.google.com")])
assert set(links) == set([Link("http://www.google.com"), Link("https://www.google.com")])
示例9: test_link_wrapping
def test_link_wrapping():
link = Link.wrap('https://www.google.com')
assert link.url == 'https://www.google.com'
link = Link.wrap(Link.wrap('https://www.google.com'))
assert link.url == 'https://www.google.com'
with pytest.raises(ValueError):
Link.wrap(1234)
with pytest.raises(ValueError):
Link.wrap_iterable(1234)
links = Link.wrap_iterable('https://www.google.com')
assert len(links) == 1
assert links[0].url == 'https://www.google.com'
links = Link.wrap_iterable(['https://www.google.com', Link('http://www.google.com')])
assert set(links) == set([
Link('http://www.google.com'),
Link('https://www.google.com'),
])
示例10: from_href
def from_href(cls, href, **kw):
"""Convert from a url to Package.
:param href: The url to parse
:type href: string
:returns: A Package object if a valid concrete implementation exists, otherwise None.
"""
package = cls._HREF_TO_PACKAGE_CACHE.get(href)
if package is not None:
return package
link_href = Link.wrap(href)
for package_type in cls._REGISTRY:
try:
package = package_type(link_href.url, **kw)
break
except package_type.InvalidPackage:
continue
if package is not None:
cls._HREF_TO_PACKAGE_CACHE.store(href, package)
return package
示例11: test_stream_filelike_without_md5
def test_stream_filelike_without_md5():
with make_url(BLOB) as url:
request = requests.get(url)
filelike = StreamFilelike(request, Link.wrap(url))
assert filelike.read() == BLOB
示例12: test_stream_filelike_with_incorrect_md5
def test_stream_filelike_with_incorrect_md5():
with make_url(BLOB, 'f' * 32) as url:
request = requests.get(url)
filelike = StreamFilelike(request, Link.wrap(url))
with pytest.raises(Context.Error):
filelike.read()
示例13: test_link_join
def test_link_join():
link = Link('https://www.google.com/bar/')
assert link.join('/foo').url == 'https://www.google.com/foo'
assert link.join('#foo').url == 'https://www.google.com/bar/#foo'
assert link.join('foo').url == 'https://www.google.com/bar/foo'
示例14: test_link_join
def test_link_join():
link = Link("https://www.google.com/bar/")
assert link.join("/foo").url == "https://www.google.com/foo"
assert link.join("#foo").url == "https://www.google.com/bar/#foo"
assert link.join("foo").url == "https://www.google.com/bar/foo"