本文整理汇总了Python中utils.download函数的典型用法代码示例。如果您正苦于以下问题:Python download函数的具体用法?Python download怎么用?Python download使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了download函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fetch_house_committee_meetings
def fetch_house_committee_meetings(committees, options):
# Load any existing meetings file so we can recycle any GUIDs.
existing_meetings = []
output_file = output_for("house")
if os.path.exists(output_file):
existing_meetings = json.load(open(output_file))
opts = dict(options)
opts["binary"] = True
opts["force"] = True
meetings = []
seen_meetings = set()
# Scrape the committee listing page for a list of committees with scrapable events.
committee_html = utils.download(
"http://docs.house.gov/Committee/Committees.aspx", "committee_schedule/house_overview.html", options
)
for cmte in re.findall(r'<option value="(....)">', committee_html):
if cmte not in committees:
logging.error("Invalid committee code: " + cmte)
continue
# Download the feed for this committee.
html = utils.download(
"http://docs.house.gov/Committee/RSS.ashx?Code=%s" % cmte, "committee_schedule/house_%s.xml" % cmte, opts
)
# It's not really valid?
html = html.replace(
" ", " "
) # who likes nbsp's? convert to spaces. but otherwise, entity is not recognized.
# print html
# Parse and loop through the meetings listed in the committee feed.
dom = lxml.etree.fromstring(html)
# original start to loop
for mtg in dom.xpath("channel/item"):
eventurl = unicode(mtg.xpath("string(link)"))
event_id = re.search(r"EventID=(\d+)$", eventurl).group(1)
pubDate = datetime.datetime.fromtimestamp(mktime(parsedate(mtg.xpath("string(pubDate)"))))
# skip old records of meetings, some of which just give error pages
if pubDate < (datetime.datetime.now() - datetime.timedelta(days=60)):
continue
# Events can appear in multiple committee feeds if it is a joint meeting.
if event_id in seen_meetings:
logging.info("Duplicated multi-committee event: " + event_id)
continue
seen_meetings.add(event_id)
# this loads the xml from the page and sends the xml to parse_house_committee_meeting
load_xml_from_page(eventurl, options, existing_meetings, committees, event_id, meetings)
# if bad zipfile
if load_xml_from_page == False:
continue
print "[house] Found %i meetings." % len(meetings)
return meetings
示例2: extract_content
def extract_content(self):
classes = self.extract_classes()
for klass in classes[1:]: # Exclude ONLINE CLASS
folder_name = remove_accents(klass['class'])
create_folder(folder_name)
print('Extracting Class: {0}'.format(klass['class']))
self.browser.get('https://unipac-bomdespacho.blackboard.com{0}'.format(klass['href']))
self.browser.find_element_by_id('header::0-whatsNewView::CO').click() # Open content list
block_class_contents = self.browser.find_element_by_id('block::0-whatsNewView::CO')
class_contents = block_class_contents.find_elements_by_css_selector(
"a[onclick*='nautilus_utils.actionSelected']"
)
i_content = 0
for i_content in range(i_content, len(class_contents)):
try:
block_classes_contents = self.browser.find_element_by_id('block::0-whatsNewView::CO')
class_contents = block_classes_contents.find_elements_by_css_selector(
"a[onclick*='nautilus_utils.actionSelected']"
)
class_contents[i_content].click()
self.check_visibility(By.CLASS_NAME, "individualContent-link")
file_link = self.browser.find_element_by_class_name('individualContent-link').get_attribute('href')
cookies = self.browser.get_cookies()
download(cookies, file_link, folder_name)
self.browser.back()
self.check_visibility(By.ID, "block::0-whatsNewView::CO")
except TimeoutException:
print("Error in: {0} - {1}".format(klass['class'], klass['href']))
示例3: main
def main():
with tf.variable_scope('input') as scope:
# use variable instead of placeholder because we're training the intial image to make it
# look like both the content image and the style image
input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
model = vgg_model.load_vgg(VGG_MODEL, input_image)
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
content_image = content_image - MEAN_PIXELS
style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
style_image = style_image - MEAN_PIXELS
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
input_image, content_image, style_image)
###############################
## TO DO: create optimizer
model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'],
global_step=model['global_step'])
###############################
model['summary_op'] = _create_summary(model)
initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
train(model, input_image, initial_image)
示例4: multiple_engine
def multiple_engine(self, song, lrc_path, artist, title):
try:
ret = False
result = TTPlayer().request(artist, title)
if result:
if config.getboolean("lyrics", "auto_download"):
ret = utils.download(result[0][2], lrc_path)
if ret and self.vaild_lrc(lrc_path):
return lrc_path
else:
os.unlink(lrc_path)
duomi_result = DUOMI().request(artist, title)
if duomi_result:
if config.getboolean("lyrics", "auto_download"):
ret = utils.download(duomi_result[0][2], lrc_path, "gbk")
if ret and self.vaild_lrc(lrc_path):
return lrc_path
else:
os.unlink(lrc_path)
soso_result = SOSO().request(artist, title)
if soso_result:
if config.getboolean("lyrics", "auto_download"):
ret = utils.download(soso_result[0][2], lrc_path, "gb18030")
if ret and self.vaild_lrc(lrc_path):
return lrc_path
else:
os.unlink(lrc_path)
except:
return None
示例5: populate_events
def populate_events(ap_args):
"""Main function to populate the database with archive events.
:Parameters:
- ap_args : dict : Information related to archive(s).
"""
FLOG.debug(ap_args)
if __name__ != '__main__':
ap_args = check_if_params(ap_args)
check_arg_for_none_value(ap_args)
CLOG.info('DB Populate args :- ' + str(ap_args))
arch_path = ap_args.get('temp')
del_arch_path = ap_args.get('delete_temp')
if not arch_path:
arch_path = gisc_msgs.TEMP_LOCATION
arg = [k for k in ap_args if k is not 'temp'][0]
if arg is 'url':
empty_directory(arch_path)
download(ap_args[arg], arch_path)
elif arg is 'arch_date':
empty_directory(arch_path)
download(get_url_from_date(ap_args[arg]), arch_path)
elif arg is 'src_dir':
arch_path = ap_args[arg]
elif arg is 'zip':
extract_zip(ap_args[arg], arch_path)
elif arg is 'files':
empty_directory(arch_path)
map(lambda x: handle_valid_invalid_files(x, arch_path), ap_args[arg])
populate(arch_path)
if arg is not 'src_dir' and del_arch_path:
empty_directory(arch_path, False)
示例6: define_words
def define_words(a_list):
"""
:param a_list: a list of words
:return: alist of semicolon separated information about each word
- word, type of word, example usage
"""
a_result = []
with open("words_wiki_500.txt", "w") as out_file:
for word in a_list:
"""stahni stranku z cambridge
najdi jednotlive casti pomoci regexpu
sloz vysledek
pridej do resultu
"""
clean_word = word.strip()
logger.debug("word: %s" % clean_word)
utils.download(get_page(clean_word), get_file_name(clean_word), logger)
word_type = utils.get_info(
get_file_name(clean_word),
'span class="headword">.*?%s.*?span class="pos".*?>(.*?)<' % clean_word,
logger,
)
out_line = "%s\t%s\n" % (clean_word, word_type)
logger.debug(out_line)
out_file.write(out_line)
out_file.close()
示例7: get
def get(gist_id, requested_file, destination_dir, facade):
""" Download a gist file.
Gists can have several files. This method searches for and downloads
a single file from a gist.
If the 'requested_file' is not informed, then it won't raise an error
only if the gist have just a single file.
:param gist_id: identifier of the gist to download
:param requested_file: name of the Gist file to download
:param destination_dir: destination directory after the download
:param facade: instance of the object that actually perform the request
"""
# Get the gist information
response = facade.request_gist(gist_id)
if response.ok:
# Gist file found. Parse it into a 'model.Gist' class.
gist_obj = model.Gist(get_json(response))
list_names = [gistfile.filename for gistfile in gist_obj.files]
if len(gist_obj.files) == 1 and not requested_file:
# Download the only file in the gist
gistfile = gist_obj.files[0]
download(gistfile.raw_url, destination_dir,
gistfile.filename, gistfile.size)
result = build_result(True, literals.DOWNLOAD_OK,
gistfile.filename)
else:
# Gist have more than one file and filename not specified. Error
if not requested_file:
list_names = ", ".join(list_names)
result = build_result(False, literals.DOWNLOAD_MORE_FILES,
list_names)
else:
# Search for the Gist file
gistfile = gist_obj.getFile(requested_file)
if gistfile:
# Gist file found. Download it.
download(gistfile.raw_url, destination_dir,
gistfile.filename, gistfile.size)
result = build_result(True, literals.DOWNLOAD_OK,
gistfile.filename)
else:
# Requested file not found in Gist
list_of_names = ", ".join(list_names)
result = build_result(False, literals.FILE_NOT_FOUND,
list_of_names)
else:
# Handle GitHub response error
result = build_result(False, literals.DOWNLOAD_ERROR,
get_json(response)['message'])
return result
示例8: __init__
def __init__(self, input_img):
# 下载文件
utils.download(VGG_DOWNLOAD_LINK, VGG_FILENAME, EXPECTED_BYTES)
# 加载文件
self.vgg_layers = scipy.io.loadmat(VGG_FILENAME)["layers"]
self.input_img = input_img
# VGG在处理图像时候会将图片进行mean-center,所以我们首先要计算RGB三个channel上的mean
self.mean_pixels = np.array([123.68, 116.779, 103.939]).reshape((1, 1, 1, 3))
示例9: main
def main(url=ZIP_CODE_URL):
path = utils.DATASET_HOME + ADDRESS_ZIP
utils.download(url, path)
files = utils.unzip(path)
if files and len(files) > 0:
write_address(files[0])
else:
print("failed to download or unzip the file. please see at {0}.".format(utils.DATASET_HOME))
示例10: install
def install(self, args=None):
download('http://www.lacusveris.com/PythonTidy/PythonTidy-1.16.python', 'pythontidy.py')
mkdir(finalTidyDir)
shutil.move(join(alePath('tmp'), 'pythontidy.py'), finalTidyPath)
os.system('chmod +x %s' % finalTidyPath)
logging.info('Patching tidy to wrap at 120 columns instead of 80 ...')
os.system('patch %s %s' % (finalTidyPath, join(alePath('recipes_all/tidy/'), 'tidy80col.patch')))
示例11: _download
def _download(self, src, dst_path):
"""download a file"""
src_file = os.path.basename(src)
dst = os.path.join(dst_path, src_file)
if os.path.isfile(dst):
logging.info('"%s" already exists, download skipped', dst)
else:
utils.make_dirs(dst_path)
utils.download(src, dst)
示例12: download_report
def download_report(report):
report_path = "%s/%s/%s/report.%s" % (report['inspector'], report['year'], report['report_id'], report['file_type'])
binary = (report['file_type'] == 'pdf')
utils.download(
report['url'],
"%s/%s" % (utils.data_dir(), report_path),
{'binary': binary}
)
return report_path
示例13: do_snapshot_download
def do_snapshot_download(self, args):
'''Download a SNAPSHOT'''
snapshot = args.snapshot
body = self.client.snapshot_download(snapshot=snapshot)
result = utils.loads(body)
result.pop()
if len(result) == 0:
print('Snapshot %s does not exist.' % snapshot)
return -1
uri=result.pop()
utils.download(uri)
示例14: multiple_engine
def multiple_engine(self, song, lrc_path, artist, title):
try:
ret = False
ting_result = poster.query_lrc_info(artist, title)
if ting_result:
urls = [item[2] for item in ting_result]
for url in urls:
ret = utils.download(url, lrc_path)
if ret:
return lrc_path
result = TTPlayer().request(artist, title)
if result:
urls = [item[2] for item in result]
for url in urls:
ret = utils.download(url, lrc_path)
if ret and self.vaild_lrc(lrc_path):
return lrc_path
ttpod_result = TTPod().request_data(artist, title)
if ttpod_result:
with open(lrc_path, 'wb') as fp:
fp.write(ttpod_result)
return lrc_path
duomi_result = DUOMI().request(artist, title)
if duomi_result:
urls = [item[2] for item in duomi_result]
for url in urls:
ret = utils.download(url, lrc_path, "gbk")
if ret and self.vaild_lrc(lrc_path):
return lrc_path
soso_result = SOSO().request(artist, title)
if soso_result:
urls = [item[2] for item in soso_result]
for url in urls:
ret = utils.download(url, lrc_path, "gb18030")
if ret and self.vaild_lrc(lrc_path):
return lrc_path
try:
os.unlink(lrc_path)
except:
pass
return None
except Exception, e:
print e
return None
示例15: download_to_cache
def download_to_cache(meta):
if not isdir(SRC_CACHE):
os.makedirs(SRC_CACHE)
fn = meta['fn']
md5 = meta.get('md5')
path = join(SRC_CACHE, fn)
if not isfile(path):
download(meta['url'], path, md5)
if md5 and not md5_file(path) == md5:
raise Exception("MD5 mismatch: %r" % meta)
return path