本文整理汇总了Python中urllib.urlretrieve函数的典型用法代码示例。如果您正苦于以下问题:Python urlretrieve函数的具体用法?Python urlretrieve怎么用?Python urlretrieve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlretrieve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DownloadUpdate
def DownloadUpdate(self, file):
self.log('Downloading: %s' % file)
dirfile = os.path.join(self.UpdateTempDir,file)
dirname, filename = os.path.split(dirfile)
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except:
self.log('Error creating directory: ' +dirname)
url = self.SVNPathAddress+urllib.quote(file)
try:
if re.findall(".xbt",url):
self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0])
urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8"))
else: urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8") )
self.DownloadedFiles.append(urllib.unquote(url))
return 1
except:
try:
time.sleep(2)
if re.findall(".xbt",url):
self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0])
urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8"))
else: urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8") )
urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8"))
self.DownloadedFiles.append(urllib.unquote(url))
return 1
except:
self.log("Download failed: %s" % url)
self.DownloadFailedFiles.append(urllib.unquote(url))
return 0
示例2: getAndUncompress
def getAndUncompress( libraries ):
for libname, url, inNewDirectory in libraries:
print '_'*80
print '--', libname
parts = url.split('/')
filename = [p for p in parts if len(getKnowExtensions(p))]
#if len(filename) == 0:
# print '-'*40
# print 'No filename with a regognize extension in "'+libname+'" url="'+url+'"'
# print '-'*40
# continue
filename = filename[0]
print url, ' -> ', filename
ext = getKnowExtensions(filename)[0]
current_file = filename
try:
if os.path.isfile(filename): # if not already downloaded
print 'Already downloaded: ', filename
else:
urllib.urlretrieve(url, filename, dlProgress)
dirname = filename[:-len(ext)-1]
#if os.path.isdir(libname) or os.path.islink(libname): # if not already uncompressed
if os.path.isdir(filename[:-len(ext)-1]) :
print 'Already uncompressed : ', dirname
else:
uncompress( filename, ext, inNewDirectory, libname )
except Exception, e:
print 'uncompress error (', str(e), ')'
示例3: cacheSong
def cacheSong( self, songNumber ):
info = self.songinfo[songNumber]
if not info["caching"]:
print "Caching song %s"%info['title']
info["caching"] = True
urllib.urlretrieve(str(info['url']), os.path.join(CachePath, "%s.mp3"%info['title']))
info["localpath"] = os.path.join(CachePath, "%s.mp3"%info['title'])
示例4: downloadFile
def downloadFile(url,dest):
try:
urllib.urlretrieve(url,dest)
except Exception, e:
dialog = xbmcgui.Dialog()
main.ErrorReport(e)
dialog.ok("Mash Up", "Report the error below at " + main.supportsite, str(e), "We will try our best to help you")
示例5: test_CalibrationDifference1
def test_CalibrationDifference1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests sould exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
print('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
print('Loading %s...\n' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading\n')
volumeNode = slicer.util.getNode(pattern="FA")
logic = CalibrationDifferenceLogic()
self.assertTrue( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
示例6: showInfo
def showInfo(self):
if self.check == "true" and self.menulist:
m_title = self["menulist"].getCurrent()[0][0]
m_url = self["menulist"].getCurrent()[0][1]
if m_url:
#m_url = re.findall('(.*?)\.', m_url)
#extra_imdb_convert = "._V1_SX320.jpg"
#m_url = "http://ia.media-imdb.com/images/%s%s" % (m_url[0], extra_imdb_convert)
print "EMC iMDB: Download Poster - %s" % m_url
urllib._urlopener = AppURLopener()
urllib.urlretrieve(m_url, self.path)
urllib.urlcleanup()
if os.path.exists(self.path):
self.poster_resize(self.path, m_title)
#ptr = LoadPixmap(self.path)
#if ptr is None:
# ptr = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/EnhancedMovieCenter/img/no_poster.png")
# print "EMC iMDB: Load default NO Poster."
#if ptr is not None:
# self["poster"].instance.setPixmap(ptr)
# print "EMC iMDB: Load Poster - %s" % m_title
else:
print "EMC iMDB: No url found for - %s" % m_title
else:
print "EMC iMDB: No url found for - %s" % m_title
示例7: genericRetrieve
def genericRetrieve(self, url, root, name):
'''Fetch the gzipped tarfile indicated by url and expand it into root
- All the logic for removing old versions, updating etc. must move'''
# get the tarball file name from the URL
filename = os.path.basename(urlparse.urlparse(url)[2])
localFile = os.path.join(root,'_d_'+filename)
ext = os.path.splitext(localFile)[1]
if ext not in ['.bz2','.tbz','.gz','.tgz','.zip','.ZIP']:
raise RuntimeError('Unknown compression type in URL: '+ url)
self.logPrint('Downloading '+url+' to '+localFile)
if os.path.exists(localFile):
os.unlink(localFile)
try:
urllib.urlretrieve(url, localFile)
except Exception, e:
failureMessage = '''\
Unable to download package %s from: %s
* If URL specified manually - perhaps there is a typo?
* If your network is disconnected - please reconnect and rerun ./configure
* Or perhaps you have a firewall blocking the download
* Alternatively, you can download the above URL manually, to /yourselectedlocation/%s
and use the configure option:
--download-%s=/yourselectedlocation/%s
''' % (name, url, filename, name.lower(), filename)
raise RuntimeError(failureMessage)
示例8: imagetitan_save_image
def imagetitan_save_image(self, imggrp, imgmiddle, imgname):
# generate just the filename of the image to be locally saved
savefile = join(self.basedir, imgname)
# generate the url of the image
download_url = 'http://' + imggrp + '.imagetitan.com/' + imggrp + imgmiddle + imgname
# finally save the image on the desidered directory
urlretrieve(download_url, savefile)
示例9: get_amalgamation
def get_amalgamation():
"""Download the SQLite amalgamation if it isn't there, already."""
if os.path.exists(AMALGAMATION_ROOT):
return
os.mkdir(AMALGAMATION_ROOT)
print "Downloading amalgation."
# XXX upload the amalgamation file to a somewhat more
# official place
amalgamation_url = ("http://futeisha.org/sqlcipher/"
"amalgamation-sqlcipher-2.1.0.zip")
# and download it
print 'amalgamation url: %s' % (amalgamation_url,)
urllib.urlretrieve(amalgamation_url, "tmp.zip")
zf = zipfile.ZipFile("tmp.zip")
files = ["sqlite3.c", "sqlite3.h"]
directory = zf.namelist()[0]
for fn in files:
print "Extracting", fn
outf = open(AMALGAMATION_ROOT + os.sep + fn, "wb")
outf.write(zf.read(directory + fn))
outf.close()
zf.close()
os.unlink("tmp.zip")
示例10: download_images
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# Verify Directory Exists. If not create it.
print 'Verifying Directory: ' + dest_dir
if os.path.exists(dest_dir)==False:
print 'Creating Directory: ' + dest_dir
os.makedirs(dest_dir)
# Download Image Files with incremented nama and create IMG html line
f = open(dest_dir + '/index.html', 'a')
img_html = []
img_count = len(img_urls)
i = 0
for img in img_urls:
i = i + 1
print 'Retrieving File ' + str(i) + ' of ' + str(img_count) + '.'
urllib.urlretrieve(img, dest_dir + '/img' + str(i))
img_html.append('<img src=\"img' + str(i) + '\">')
# Create index.html file
print 'Writing index.html'
f.write('<verbatim>')
f.write('<html>')
f.write('<body>')
f.write(''.join(img_html))
f.write('</body>')
f.write('</html>')
f.close()
示例11: update
def update(params):
# Descarga el ZIP
xbmc.output("[updater.py] update")
xbmc.output("[updater.py] cwd="+os.getcwd())
remotefilename = REMOTE_FILE+params.get("version")+".zip"
localfilename = LOCAL_FILE+params.get("version")+".zip"
xbmc.output("[updater.py] remotefilename=%s" % remotefilename)
xbmc.output("[updater.py] localfilename=%s" % localfilename)
xbmc.output("[updater.py] descarga fichero...")
inicio = time.clock()
urllib.urlretrieve(remotefilename,localfilename)
fin = time.clock()
xbmc.output("[updater.py] Descargado en %d segundos " % (fin-inicio+1))
# Lo descomprime
xbmc.output("[updater.py] descomprime fichero...")
import ziptools
unzipper = ziptools.ziptools()
destpathname = DESTINATION_FOLDER
xbmc.output("[updater.py] destpathname=%s" % destpathname)
unzipper.extract(localfilename,destpathname)
# Borra el zip descargado
xbmc.output("[updater.py] borra fichero...")
os.remove(localfilename)
示例12: download_images
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# prepare output directory - if it is not there already, create it
if not(os.path.exists(dest_dir)):
os.mkdir(dest_dir)
i=0
imgstr=''
for slice in img_urls:
# use a simple naming scheme for new image files and make sure they end up in the given directory
outfile='img'+str(i)+'.png'
imgstr+='<img src="'+outfile+'">'
outfile=os.path.join(dest_dir, outfile)
print "Fetching "+outfile+' '+ os.path.basename(slice)
urllib.urlretrieve('http://code.google.com/'+slice, outfile)
i+=1
print "All image strips downloaded"
f=open(os.path.join(dest_dir, 'index.html'), 'w')
f.write('<html><body>')
f.write(imgstr)
f.write('</body></html>')
f.close()
示例13: searchcode
def searchcode(url, regex):
code = urlopen(url).read()
result = ""
try:
regexresults = re.search(regex, str(code))
result = str(regexresults.group(0))
if result is not None:
if args.url is True:
logger.info(" " + str(url))
if args.verbose is True:
logger.info(" [+] Found the following results")
logger.info(" " + str(result))
if args.write_file:
if result == "":
pass
else:
f = open(args.write_file, "a")
f.write(str(result + "\n"))
f.close()
if args.directory:
filename = args.directory + "/" + url.replace("/", "-")
if not os.path.exists(args.directory):
os.makedirs(args.directory)
logger.info(" [+] Downloading " + filename)
urlretrieve(url, filename)
fp = open(filename, "wb")
fp.write(code)
fp.close()
else:
pass
except:
pass
示例14: reload_indexp
def reload_indexp(working_dir, cycle):
def execute_file(filename):
contents = " ".join([line for line in open(filename, 'r') if line[0:2] != '--'])
statements = contents.split(';')[:-1] # split on semi-colon. Last element will be trailing whitespace
for statement in statements:
log.info("Executing %s" % statement)
c.execute(statement)
try:
working_dir = os.path.expanduser(working_dir)
if not os.path.isdir(working_dir):
os.makedirs(working_dir)
log = set_up_logger('indexp_importer', working_dir, 'IndExp Importer Fail')
local_file_path = os.path.join(working_dir, LOCAL_FILE)
log.info("downloading %s to %s..." % (DOWNLOAD_URL.format(cycle), local_file_path))
urllib.urlretrieve(DOWNLOAD_URL.format(cycle), local_file_path)
log.info("uploading to table %s..." % TABLE_NAME)
c = connection.cursor()
c.execute("insert into fec_indexp_out_of_date_cycles (cycle) values ({})".format(cycle))
c.execute("DELETE FROM %s" % TABLE_NAME)
c.copy_expert("COPY %s (candidate_id, candidate_name, spender_id, spender_name, election_type, candidate_state, candidate_district, candidate_office, candidate_party, amount, date, aggregate_amount, support_oppose, purpose, payee, filing_number, amendment, transaction_id, image_number, received_date, prev_file_num) FROM STDIN CSV HEADER" % TABLE_NAME, open(local_file_path, 'r'))
c.execute("update {} set cycle = {}".format(TABLE_NAME, cycle))
execute_file(SQL_POSTLOAD_FILE)
c.execute("delete from fec_indexp_out_of_date_cycles")
log.info("Import Succeeded.")
except Exception as e:
log.error(e)
raise
示例15: download_file_no_requests
def download_file_no_requests(url, path, overwrite=False):
"""
This function ...
:param url:
:param path:
:param overwrite:
:return:
"""
# Get the name of the file
filename = fs.name(url)
# Determine the local path to the file
filepath = fs.join(path, filename) if fs.is_directory(path) else path
# Check filepath
if fs.is_file(filepath):
if overwrite: fs.remove_file(filepath)
else: raise IOError("File is already present: " + filepath)
# Debugging
log.debug("Downloading '" + filename + "' to '" + path + "' ...")
log.debug("URL: " + url)
# Download
urllib.urlretrieve(url, filepath)
# Return the file path
return filepath