本文整理汇总了Python中wget.download函数的典型用法代码示例。如果您正苦于以下问题:Python download函数的具体用法?Python download怎么用?Python download使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了download函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __make_icon_osx
def __make_icon_osx():
lisa_shortcut = op.expanduser("~/Desktop/lisa")
if not os.path.exists(lisa_shortcut):
with open(lisa_shortcut, 'w') as outfile:
outfile.write(
"\
#!/bin/bash\n\
export PATH=$HOME/miniconda2/bin:$HOME/anaconda2/bin:$HOME/miniconda/bin:$HOME/anaconda/bin:$PATH\n\
lisa"
)
os.chmod(lisa_shortcut,
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
stat.S_IRUSR | stat.S_IRGRP | stat.S_IXOTH |
stat.S_IWUSR | stat.S_IWGRP
)
import wget
lisa_icon_path= op.expanduser("~/lisa_data/.lisa/LISA256.icns")
if not os.path.exists(lisa_icon_path):
try:
wget.download(
"https://raw.githubusercontent.com/mjirik/lisa/master/applications/LISA256.icns",
out=lisa_icon_path
)
except:
logger.warning('logo download failed')
pass
示例2: download_file
def download_file(filename, destination):
"""Download remote file using the `wget` Python module."""
destdir = os.path.split(destination)[0]
if not os.path.isdir(destdir):
os.makedirs(destdir)
url = get_remote_url(filename)
wget.download(url, out=destination)
示例3: updateFiles
def updateFiles(self):
print "Update Files"
# Clean out file array
self.data.params.files = []
# Always assume that the most up to date runtime is not yet available
runtime = ((self.current_time.hour-6) / 6) * 6 # Get the Model Runtime
if runtime < 0:
runtime = 0
launch_time_offset = self.launch_time - self.current_time
# For now, if the prediction take place in the past... don't
if launch_time_offset < timedelta(0):
launch_time_offset = timedelta(0)
prediction_offset = (launch_time_offset.seconds / 3600 / 3) * 3
### NOTE THIS ISN'T DONE!
self.data.params.files.append("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, runtime, prediction_offset))
if not os.path.isfile("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, runtime, prediction_offset)):
download_url = "http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_hd.pl?file=gfs.t%02dz.mastergrb2f%02d&leftlon=290&rightlon=294&toplat=49&bottomlat=43&dir=%%2Fgfs.%04d%02d%02d%02d%%2Fmaster" % (runtime, prediction_offset, self.launch_time.year, self.launch_time.month, self.launch_time.day, runtime)
print download_url
print (runtime, prediction_offset, self.current_time.year, self.current_time.month, self.current_time.day, runtime)
file = wget.download(download_url)
shutil.move(file, './wind/49-43-290-294-%04d%02d%02d%02d-%s' % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset, file))
self.data.params.files.append("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, runtime, prediction_offset+3))
if not os.path.isfile("./wind/49-43-290-294-%04d%02d%02d%02d-gfs.t%02dz.mastergrb2f%02d" % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, runtime, prediction_offset+3)):
download_url = "http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_hd.pl?file=gfs.t%02dz.mastergrb2f%02d&leftlon=290&rightlon=294&toplat=49&bottomlat=43&dir=%%2Fgfs.%04d%02d%02d%02d%%2Fmaster" % (runtime, prediction_offset+3, self.current_time.year, self.current_time.month, self.current_time.day, runtime)
file = wget.download(download_url)
shutil.move(file, './wind/49-43-290-294-%04d%02d%02d%02d-%s' % (self.current_time.year, self.current_time.month, self.current_time.day, prediction_offset+3, file))
示例4: main
def main(argv=None):
if argv is None:
argv = sys.argv
print('Creating simple wiki serialized corpus')
# Download the raw file if we do not have it already
if not os.path.isfile(WIKIFILE):
# Get the file
wget.download(WIKIURL)
wiki = WikiCorpus(WIKIFILE, lemmatize=False)
i = 0
article_dict = {}
for text in wiki.get_texts(meta=True):
url_string = 'https://simple.wikipedia.org/wiki/?curid={}'
article_dict[i] = (url_string.format(text[0]), text[1])
i += 1
with open(ARTICLEDICT, 'w') as f:
json.dump(article_dict, f)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1,
keep_n=DEFAULT_DICT_SIZE)
MmCorpus.serialize(MMFILE, wiki, progress_cnt=10000, )
wiki.dictionary.save_as_text(DICTFILE)
print('Simple wiki serialized corpus created')
# Now run LSI
dictionary = Dictionary.load_from_text(DICTFILE)
mm = MmCorpus(MMFILE)
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(TDIFMODEL)
MmCorpus.serialize(TDIFFILE, tfidf[mm], progress_cnt=10000)
mm_tdif = MmCorpus(TDIFFILE)
lsi = LsiModel(mm_tdif, id2word=dictionary, num_topics=300)
index = similarities.MatrixSimilarity(lsi[mm_tdif])
index.save(SIMMATRIX)
lsi.save(LSIMODEL)
print("LSI model and index created")
示例5: download_gif
def download_gif(self, term, slide_num):
# If we have at least 3 local gifs, use one of those
if (term in self.gifs) and (len(self.gifs[term]) > 3):
return os.path.join("GIFs", "%s.gif" % random.choice(self.gifs[term]))
try:
# Download the gif
#img = translate(term, app_key=self.GIPHY_API_KEY)
img = translate(term)
image_path = os.path.join(self.resources_dir, "%d.gif" % slide_num)
wget.download(img.media_url, image_path)
if not (term in self.gifs):
self.gifs[term] = []
if not (img.id in self.gifs[term]):
self.gifs[term].append(img.id)
shutil.copy(image_path, os.path.join("GIFs", "%s.gif" % img.id))
with open(os.path.join("GIFs", "hashes.json"), "w") as f:
json.dump(self.gifs, f, indent=2)
return image_path
except:
return None
示例6: _parse_page_urls_and_make_url_list
def _parse_page_urls_and_make_url_list(url_list, credentials, downloaddir, file_extns_of_intrest):
for url in url_list:
if credentials != None:
page_url = _convert_url_to_url_with_password(url, credentials)
else:
page_url = url
logger.info("downloading " + page_url)
try:
# remove any previously existing temp file, this is needed because if a file exists then
# wget does some name mangling to create a file with a different name and then that would
# need to be passed to BS4 and then ultimately that file would also be deleted, so just delete
# before hand.
if os.path.exists(TEMP_DOWNLOAD_FILE):
os.remove(TEMP_DOWNLOAD_FILE)
wget.download(page_url, TEMP_DOWNLOAD_FILE, bar=_download_progress_bar)
soup = BeautifulSoup(open(TEMP_DOWNLOAD_FILE))
links = soup.findAll(ANCHOR_TAG)
_make_list_of_download_candidates(page_url, links, downloaddir, file_extns_of_intrest)
except Exception, e:
logger.error("Exception: " + str(e))
示例7: download_if_needed
def download_if_needed(url, filename):
""" Downloads the data from a given URL, if not already present in the directory, or displays any of the following:
1. The file already exists
2. URL does not exist
3. Server is not responding
"""
if os.path.exists(filename):
explanation = filename+ ' already exists'
return explanation
else:
try:
r = urlopen(url)
except URLError as e:
r = e
if r.code < 400:
wget.download(url)
explanation = 'downloading'
return explanation
elif r.code>=400 and r.code<500:
explanation = 'Url does not exist'
return explanation
else:
explanation = 'Server is not responding'
return explanation
示例8: doTask
def doTask(self, tstamp):
"""Download image."""
tstamp = coils.string2time(tstamp)
fname = coils.time2fname(tstamp) + '.jpg'
dest_dir = os.path.join(self._config['pics_dir'], coils.time2dir(tstamp))
dest_fname = os.path.join(
dest_dir,
fname,
)
if os.path.exists(dest_fname):
print('Skipping {}'.format(dest_fname))
return
try:
os.makedirs(dest_dir)
except os.error:
pass
saved = os.getcwd()
os.chdir(dest_dir)
url = '{}/pics/{}.jpg'.format(
self._url,
coils.time2fname(tstamp, full=True),
)
print(url)
wget.download(url, bar=None)
os.chdir(saved)
# Propagate timestamp downstream.
return tstamp
示例9: update
def update():
print("Downloading Update")
wget.download('<zip>', 'update.zip')
try:
shutil.rmtree(dir+'\config')
except:
print("Continuing")
try:
shutil.rmtree(dir+'\mods')
except:
print("Continuing")
try:
shutil.rmtree(dir+'\jarmods')
except:
print("Continuing")
with zipfile.ZipFile('update.zip') as myzip:
myzip.extractall(dir)
myzip.close()
os.remove('svn.txt')
os.remove('update.zip')
os.rename('svnnew.txt', 'svn.txt')
print("Update Complete")
示例10: foo
def foo():
fin=open(sys.argv[1],'r')
for line in fin:
a,b=line.strip().rstrip('\n').split(',')
c=b.strip('"')+'_'+a.strip('"')+'.pdf'
makeurl='http://www.tpcuiet.com/resume_upload/cannot_find_it_haha/{}'.format(c)
wget.download(makeurl)
示例11: download
def download(url):
"""Copy the contents of a file from a given URL
to a local file.
"""
wf = urllib2.urlopen(url)
html=wf.read()
# print html
flist=[]
mhdf = re.findall('\"M.*\.hdf\"', html)
mhdfs =[f for f in mhdf if 'h08v04' in f or 'h08v05' in f or 'h09v04' in f]
# print mhdfs
for line in mhdfs:
# print 'a line', line.replace('\"', '')
fileUrl=url+line.replace('\"', '')
print fileUrl
wget.download(fileUrl)
xhdf = re.findall('\"M.*\.hdf.xml\"', html)
xhdfs =[f for f in xhdf if 'h08v04' in f or 'h08v05' in f or 'h09v04' in f]
for line in xhdfs:
# print 'a line', line.replace('\"', '')
xfileUrl=url+line.replace('\"', '')
print xfileUrl
wget.download(xfileUrl)
示例12: download_files
def download_files(answer, download_list):
if answer == 'y' or answer == 'yes':
for item in download_list:
print item
wget.download(download_list[item])
else:
print 'Thank you and have a really great day!'
示例13: download_img
def download_img(url):
text = requests.get(url).text
soup = bs(text, "lxml")
# total
total = soup.find('span', {'style': 'color: #DB0909'}).text
total = total[: -3]
total = int(total)
# title
title = soup.find('h1', {'id': 'htilte'}).text
url_pattern = soup.find('ul', {'id': 'hgallery'})
url_pattern = url_pattern.img.get('src').replace('/0.jpg', '/{:03d}.jpg')
print title
if os.path.exists(title):
return
os.mkdir(title)
for i in xrange(total):
file_url = url_pattern.format(i)
file_name = "{:03d}.jpg".format(i)
output_file = os.path.join(title, file_name)
if i == 0:
file_url = file_url.replace("000", "0")
wget.download(file_url, out=output_file)
示例14: create_lisa_data_dir_tree
def create_lisa_data_dir_tree(oseg=None):
odp = op.expanduser('~/lisa_data/.lisa/')
if not op.exists(odp):
os.makedirs(odp)
import wget
lisa_icon_path= path(".lisa/LISA256.png")
if not op.exists(lisa_icon_path):
try:
wget.download(
"https://raw.githubusercontent.com/mjirik/lisa/master/lisa/icons/LISA256.png",
out=lisa_icon_path)
except:
import traceback
logger.warning('logo download failed')
logger.warning(traceback.format_exc())
if oseg is not None:
# used for server sync
oseg._output_datapath_from_server = op.join(oseg.output_datapath, 'sync', oseg.sftp_username, "from_server/")
# used for server sync
oseg._output_datapath_to_server = op.join(oseg.output_datapath, 'sync', oseg.sftp_username, "to_server/")
odp = oseg.output_datapath
if not op.exists(odp):
os.makedirs(odp)
odp = oseg._output_datapath_from_server
if not op.exists(odp):
os.makedirs(odp)
odp = oseg._output_datapath_to_server
if not op.exists(odp):
os.makedirs(odp)
示例15: gdb
def gdb():
try:
gdb = dict()
pre1 = "http://sourceware.org/gdb/current/onlinedocs/"
pre2 = "https://sourceware.org/gdb/talks/esc-west-1999/"
gdb[1] = pre1 + "gdb.pdf.gz"
gdb[2] = pre2 + "paper.pdf"
gdb[3] = pre2 + "slides.pdf"
print stringa
print "GDB Documentation"
print "GDB User Manual"
filename = wget.download(gdb[1])
print "\nThe Heisenberg Debugging Technology"
print "Slides/Paper/Enter(for both)"
decision = raw_input()
if decision == "Paper":
filename = wget.download(gdb[2])
elif decision == "Slides":
filename = wget.download(gdb[3])
else:
for key in range(2,4):
# print key
filename = wget.download(gdb[key])
print "\nCompleted\n"
except:
print "\n Did something else happen ? \n"