本文整理汇总了Python中Download.Download类的典型用法代码示例。如果您正苦于以下问题:Python Download类的具体用法?Python Download怎么用?Python Download使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Download类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: doRequest
def doRequest(self):
d = Download(self.Url)
if d.doRequest():
return 1
self.recs = d.getSOURCE()
return 0
示例2: dl
def dl(self):
"""
Downloads the highest Quallitiy picture available.
returns False if something goes wrong.
"""
if(self.orig_url == ""):
if(self.hq_url == ""):
down = Download(self.lq_url, self.config.get_image_folder())
if(down.perform()):
return True
else:
down = Download(self.hq_url, self.config.get_image_folder())
if(down.perform()):
return True
else:
down = Download(self.orig_url, as_var=True)
if(down.perform()):
result = down.get_result()
soup = BeautifulSoup(result.getvalue())
download_link = soup.find("a", text="this link")
orig_url = self.dl_url_base + download_link["href"]
time.sleep(120)
down = Download(orig_url, self.config.get_image_folder())
if(down.perform()):
self.file_name = down.get_output_name()
return True
return False
示例3: __init__
def __init__(self, phenny):
Download.__init__(self)
self.re_title = re.compile('ustream\.vars\.channelTitle\=\"(.*?)\"\;ustream\.vars\.')
self.re_channel = re.compile('ustream\.vars\.channelId\=(\d*?)\;ustream\.vars\.')
self.apikey = phenny.config.ustreamdevapikey
self.urltype = None
self.h = {}
示例4: __init__
def __init__(self, phenny):
Download.__init__(self)
self.re_id = re.compile("^[a-zA-Z0-9\_\-]{11}$")
self.re_fragment = re.compile("^t\=((\d+)h)?((\d+)m)?((\d+)s)?$")
self.urltype = VidType.YOUTUBE
self.h = {}
self.gdatahost = 'gdata.youtube.com'
self.developer_key = phenny.config.youtubedevapikey
self.gdataxml = None
示例5: isGoogleSearch
def isGoogleSearch(schema, ip):
d = Download(schema + '://' + ip)
if d.doRequest():
return False
if Utility.containsGoogle(d.getSOURCE()):
return True
return False
示例6: __init__
def __init__(self):
Download.__init__(self)
self.re_live_id = re.compile('stream_id \= (\d+?)\;')
self.re_recorded_id = re.compile('full_program_clipid \= (\d+?)\;')
self.re_live_title = re.compile('stream_title \= "(.+?)"\;')
self.re_recorded_title = re.compile('full_program_title \= "(.+?)"\;')
self.type = None
self.urltype = None
self.h = {}
示例7: run
def run(self):
url = self.BASE_URL + self.SeasonId + self.BASE_URL_PART_3 + str(self.PageNumber) + self.BASE_URL_PART_5
d = Download(url)
if d.doRequest():
# fail
print 'ERROR: ' + self.SeasonId + '-' + str(self.PageNumber)
else:
utfstr2file(d.getSOURCE(), './data/' + self.SeasonId + '-' + str(self.PageNumber) + '.raw')
return url
示例8: requestHtml
def requestHtml(self):
url = self.BaseUrl + self.ISBN
# print url, self.User_Agent
d = Download(url, self.User_Agent)
if d.doRequest():
return 1
self.HTML = d.getSOURCE()
return 0
示例9: request
def request(self):
baseUrl = "http://shaishufang.com/index.php/site/main/uid/"
postFix = "/friend/false/category//status//type//page/"
url = baseUrl + self.UID + postFix + str(self.Page)
d = Download(url, self.Cookie, self.Proxy)
if d.doRequest():
return False
self.HTML = d.getSOURCE()
return True
示例10: request
def request(self):
baseUrl = 'http://shaishufang.com/index.php/site/detail/uid/'
postFix = '/status//category/none/friend/false'
url = baseUrl + self.UID + '/ubid/' + self.BID + postFix
d = Download(url, self.Cookie, self.Proxy)
if d.doRequest():
return False
self.HTML = d.getSOURCE()
return True
示例11: file_exists
def file_exists(self, file_path):
hash_local = self.hash_file(file_path)
download = Download(
("https://commons.wikimedia.org/w/api.php?action=query&list"
"=allimages&format=json&aisha1=") + hash_local, as_var=True)
if(download.perform()):
content = download.get_result().getvalue()
json_data = json.loads(content)
if(len(json_data["query"]["allimages"]) > 0):
return True
else:
return False
示例12: run
def run(self):
while True:
print 'INFO: ........................................ START'
stats = self.dbm.getStats()
print 'INFO: deadLinks-', stats[0], ' unvisitedLinks-', stats[1], ' visitedLinks-', stats[2]
# get an url from unvisitedLinks
url = self.dbm.retrieveUnvisitedLink()
if url == False:
print 'DEBUG: DONE -- retrieveUnvisitedLink return False'
break
print 'DEBUG: Processing ', url
if not self.urlFilter.isPlainText(url):
print 'DEBUG: NotPlainTextURL ', url
continue
if not self.domainFilter.isInDomain(url):
print 'DEBUG: NOT IN DOMAIN ', url
continue
# requet the url
d = Download(url)
if d.doRequest() == 1:
if not self.dbm.createDeadLink(url):
print 'DEBUG: deadLinks already contain ', url
else:
print 'DEBUG: Add To deadLinks ', url
else:
if self.dbm.createVisitedLink(url):
print 'DEBUG: Add To visitedLinks ', url
else:
print 'DEBUG: Failed Add To visitedLinks ', url
# extract urls from the sourc2
u = URLExtractor(d.getSOURCE(), url)
tmpUrls = u.getUrls()
if tmpUrls:
for url in tmpUrls:
if self.dbm.isInDeadLink(url):
continue
elif self.dbm.isInVisitedLink(url):
continue
elif self.dbm.isInUnvisitedLink(url):
continue
else:
print 'DEBUG: Add To unvisitedLink ', url
self.dbm.createUnvisitedLink(url)
print 'INFO: ........................................ END'
示例13: run
def run(self, processName='MainProcess'):
for isbn in self.ISBNS:
url = 'http://www.amazon.cn/s/ref=nb_sb_noss?field-keywords=' + isbn
d = Download(url)
if d.doRequest():
print 'ERROR[' + processName + ']: ', isbn, 'NERR'
appendstr2file(isbn, './NERR.txt')
continue
asin = ASINParser(d.getSOURCE())
if asin.getAsin():
print 'INFO[' + processName + ']: ', isbn, asin.getAsin()
appendstr2file(isbn + ',' + asin.getAsin(), './OK.txt')
else:
print 'WARN[' + processName + ']: ', isbn, 'NOER'
appendstr2file(isbn, './NOER.txt')
示例14: worker
def worker(appids, isbns, appidsCycle):
# appidsCycle = cycle(appids)
for isbn in isbns:
url = 'http://' + appidsCycle.next() + '.appspot.com/url?url=' + 'http://book.douban.com/isbn/' + str(isbn)
# print 'DEBUG: ', url
d = Download(url)
if d.doRequest():
print isbn, 'network error'
continue
j = json.loads(d.getSOURCE())
print isbn, j['status_code']
return
示例15: find_urls
def find_urls(self):
"""
Finds the Download urls with different qualities and save them.
"""
down = Download(self.url, as_var=True)
if(down.perform()):
result = down.get_result()
soup = BeautifulSoup(result.getvalue())
download_links = soup.find_all("a", {"class": "DownloadLink"})
if(download_links):
self.lq_url = download_links[0]["href"]
self.hq_url = download_links[1]["href"]
raw_link = soup.find(
text="Other options available:").find_next("script").text
m = re.search(r"href=..(.*\.\b[a-zA-Z0-9]+\b)", raw_link)
if(m):
self.orig_url = self.url_base + "/" + m.group(1)