本文整理汇总了Python中mozdownload.utils.urljoin函数的典型用法代码示例。如果您正苦于以下问题:Python urljoin函数的具体用法?Python urljoin怎么用?Python urljoin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urljoin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_latest_build_date
def get_latest_build_date(self):
"""Return date of latest available nightly build."""
if self.application not in ('fennec'):
url = urljoin(self.base_url, 'nightly', 'latest-%s/' % self.branch)
else:
url = urljoin(self.base_url, 'nightly', 'latest-%s-%s/' %
(self.branch, self.platform))
self.logger.info('Retrieving the build status file from %s' % url)
parser = self._create_directory_parser(url)
parser.entries = parser.filter(r'.*%s\.txt' % self.platform_regex)
if not parser.entries:
message = 'Status file for %s build cannot be found' % \
self.platform_regex
raise errors.NotFoundError(message, url)
# Read status file for the platform, retrieve build id,
# and convert to a date
headers = {'Cache-Control': 'max-age=0'}
r = self.session.get(url + parser.entries[-1], headers=headers)
try:
r.raise_for_status()
return datetime.strptime(r.text.split('\n')[0], '%Y%m%d%H%M%S')
finally:
r.close()
示例2: test_download
def test_download(self):
"""Test download method"""
filename = 'download_test.txt'
# standard download
test_url = urljoin(self.wdir, 'download_test.txt')
scraper = mozdownload.DirectScraper(url=test_url,
directory=self.temp_dir,
version=None)
scraper.download()
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir,
filename)))
# Compare original and downloaded file via md5 hash
md5_original = create_md5(os.path.join(mhttpd.HERE, mhttpd.WDIR, filename))
md5_downloaded = create_md5(os.path.join(self.temp_dir, filename))
self.assertEqual(md5_original, md5_downloaded)
# RequestException
test_url1 = urljoin(self.wdir, 'does_not_exist.html')
scraper1 = mozdownload.DirectScraper(url=test_url1,
directory=self.temp_dir,
version=None)
self.assertRaises(requests.exceptions.RequestException,
scraper1.download)
# Covering retry_attempts
test_url2 = urljoin(self.wdir, 'does_not_exist.html')
scraper2 = mozdownload.DirectScraper(url=test_url2,
directory=self.temp_dir,
version=None,
retry_attempts=3,
retry_delay=1.0)
self.assertRaises(requests.exceptions.RequestException,
scraper2.download)
示例3: test_names_with_spaces
def test_names_with_spaces(httpd):
parser = DirectoryParser(urljoin(httpd.get_url(), 'directoryparser', 'some spaces/'))
parser.entries.sort()
# Get the contents of the folder - dirs and files
folder_path = urljoin(httpd.router.doc_root, 'directoryparser', 'some spaces')
contents = os.listdir(folder_path)
contents.sort()
assert parser.entries == contents
示例4: test_names_with_spaces
def test_names_with_spaces(self):
parser = DirectoryParser(urljoin(self.wdir, 'directoryparser', 'some spaces/'))
# Get the contents of the folder - dirs and files
folder_path = urljoin(mhttpd.HERE, mhttpd.WDIR, 'directoryparser',
'some spaces')
contents = os.listdir(folder_path)
contents.sort()
self.assertEqual(parser.entries, contents)
示例5: path_regex
def path_regex(self):
"""Return the regex for the path to the build folder."""
try:
path = '%s/' % urljoin(self.monthly_build_list_regex,
self.builds[self.build_index])
if self.application in APPLICATIONS_MULTI_LOCALE \
and self.locale != 'multi':
path = '%s/' % urljoin(path, self.locale)
return path
except:
folder = urljoin(self.base_url, self.monthly_build_list_regex)
raise errors.NotFoundError("Specified sub folder cannot be found",
folder)
示例6: test_implementation_error
def test_implementation_error(httpd, tmpdir, attr):
"""test implementations available"""
filename = 'download_test.txt'
test_url = urljoin(httpd.get_url(), filename)
scraper = DirectScraper(url=test_url, destination=str(tmpdir))
with pytest.raises(errors.NotImplementedError):
getattr(scraper, attr)
示例7: test_latest_build
def test_latest_build(httpd, tmpdir, args, filename, url):
"""Testing various download scenarios for latest release candidate builds"""
scraper = ReleaseCandidateScraper(destination=str(tmpdir), base_url=httpd.get_url(), **args)
expected_filename = os.path.join(str(tmpdir), filename)
assert scraper.filename == expected_filename
assert urllib.unquote(scraper.url) == urljoin(httpd.get_url(), url)
示例8: test_destination_multiple_dir
def test_destination_multiple_dir(httpd, tmpdir):
"""ensure that multiple non existing directories are created"""
filename = 'download_test.txt'
test_url = urljoin(httpd.get_url(), filename)
destination = os.path.join(str(tmpdir), 'tmp1', 'tmp2', filename)
scraper = mozdownload.DirectScraper(url=test_url, destination=destination)
assert scraper.destination == destination
示例9: get_build_info
def get_build_info(self):
"""Define additional build information."""
ReleaseScraper.get_build_info(self)
# Internally we access builds via index
url = urljoin(self.base_url, self.candidate_build_list_regex)
self.logger.info('Retrieving list of candidate builds from %s' % url)
parser = self._create_directory_parser(url)
if not parser.entries:
message = 'Folder for specific candidate builds at %s has not' \
'been found' % url
raise errors.NotFoundError(message, url)
self.show_matching_builds(parser.entries)
self.builds = parser.entries
self.build_index = len(parser.entries) - 1
if self.build_number and \
('build%s' % self.build_number) in self.builds:
self.builds = ['build%s' % self.build_number]
self.build_index = 0
self.logger.info('Selected build: build%s' % self.build_number)
else:
self.logger.info('Selected build: build%d' %
(self.build_index + 1))
示例10: test_scraper
def test_scraper(httpd, tmpdir, args, filename, url):
"""Testing various download scenarios for TinderboxScraper"""
scraper = TinderboxScraper(destination=str(tmpdir), base_url=httpd.get_url(), **args)
expected_filename = os.path.join(str(tmpdir), filename)
assert scraper.filename == expected_filename
assert urllib.unquote(scraper.url) == urljoin(httpd.get_url(), url)
示例11: test_init
def test_init(httpd):
"""Testing the basic functionality of the DirectoryParser Class"""
# DirectoryParser returns output
parser = DirectoryParser(httpd.get_url())
# relies on the presence of other files in the directory
# Checks if DirectoryParser lists the server entries
assert parser.entries != [], "parser.entries were not listed"
# path_regex to mozdownload -t release -p win32 -v latest
testpath = urljoin(httpd.get_url(), 'directoryparser/')
parser = DirectoryParser(testpath)
parser.entries.sort()
testdir = os.listdir(urljoin(httpd.router.doc_root, 'directoryparser'))
testdir.sort()
assert parser.entries == testdir
示例12: test_init
def test_init(self):
"""Testing the basic functionality of the DirectoryParser Class"""
# DirectoryParser returns output
parser = DirectoryParser(self.wdir)
# relies on the presence of other files in the directory
# Checks if DirectoryParser lists the server entries
self.assertNotEqual(parser.entries, [], "parser.entries were not listed")
# path_regex to mozdownload -t release -p win32 -v latest
testpath = urljoin(self.wdir, 'directoryparser/')
parser1 = DirectoryParser(testpath)
parser1.entries.sort()
testdir = os.listdir(urljoin(mhttpd.HERE, 'data', 'directoryparser'))
testdir.sort()
self.assertEqual(parser1.entries, testdir)
示例13: test_candidate_scraper
def test_candidate_scraper(self):
for test in tests_candidate_scraper:
scraper = mozdownload.ReleaseCandidateScraper(destination=self.temp_dir,
logger=self.logger,
**test['args'])
if test.get('url'):
self.assertEqual(urllib.unquote(scraper.url),
urljoin(BASE_URL, test['url']))
示例14: test_release_scraper
def test_release_scraper(self):
for test in tests_release_scraper:
scraper = mozdownload.ReleaseScraper(destination=self.temp_dir,
log_level='ERROR',
**test['args'])
if test.get('url'):
self.assertEqual(urllib.unquote(scraper.url),
urljoin(BASE_URL, test['url']))
示例15: test_retry_attempts
def test_retry_attempts(httpd, tmpdir):
test_url = urljoin(httpd.get_url(), 'does_not_exist.html')
scraper = mozdownload.DirectScraper(url=test_url,
destination=str(tmpdir),
retry_attempts=3,
retry_delay=0.1)
with pytest.raises(requests.exceptions.RequestException):
scraper.download()