本文整理汇总了Python中module.network.RequestFactory.getURL函数的典型用法代码示例。如果您正苦于以下问题:Python getURL函数的具体用法?Python getURL怎么用?Python getURL使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getURL函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: notify
def notify(self,
event,
msg="",
key=self.getConfig('apikey')):
if not key:
return
if self.core.isClientConnected() and not self.getConfig('ignoreclient'):
return
elapsed_time = time.time() - self.last_notify
if elapsed_time < self.getConf("sendtimewait"):
return
if elapsed_time > 60:
self.notifications = 0
elif self.notifications >= self.getConf("sendpermin"):
return
getURL("http://www.notifymyandroid.com/publicapi/notify",
get={'apikey' : key,
'application': "pyLoad",
'event' : event,
'description': msg})
self.last_notify = time.time()
self.notifications += 1
示例2: periodical
def periodical(self):
self.items_to_queue = []
self.items_to_collector = []
for site in ("top-rls", "movies", "Old_Stuff"):
address = "http://hd-area.org/index.php?s=" + site
req_page = getURL(address)
soup = BeautifulSoup(req_page)
self.get_title(soup)
if self.get_config("cinedubs") == True:
address = "http://hd-area.org/index.php?s=Cinedubs"
req_page = getURL(address)
soup = BeautifulSoup(req_page)
self.get_title(soup)
if len(self.get_config("pushoverapi")) > 2:
notifyPushover(self.get_config("pushoverapi"), self.items_to_queue, "QUEUE") if len(
self.items_to_queue
) > 0 else True
notifyPushover(self.get_config("pushoverapi"), self.items_to_collector, "COLLECTOR") if len(
self.items_to_collector
) > 0 else True
if len(self.get_config("pushbulletapi")) > 2:
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_queue, "QUEUE") if len(
self.items_to_queue
) > 0 else True
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_collector, "COLLECTOR") if len(
self.items_to_collector
) > 0 else True
示例3: getInfo
def getInfo(urls):
for url in urls:
h = getURL(url, just_header=True)
m = re.search(r'Location: (.+)\r\n', h)
if m and not re.match(m.group(1), FilefactoryCom.__pattern__): #: It's a direct link! Skipping
yield (url, 0, 3, url)
else: #: It's a standard html page
yield parseFileInfo(FilefactoryCom, url, getURL(url))
示例4: getInfo
def getInfo(urls):
for url in urls:
header = getURL(url, just_header=True)
if 'Location: http://cloudzer.net/404' in header:
file_info = (url, 0, 1, url)
else:
file_info = parseFileInfo(CloudzerNet, url, getURL(url, decode=True))
yield file_info
示例5: processCaptcha
def processCaptcha(self, task):
result = None
with open(task.captchaFile, "rb") as f:
data = f.read()
data = b64encode(data)
self.logDebug("%s : %s" % (task.captchaFile, data))
if task.isPositional():
mouse = 1
else:
mouse = 0
response = getURL(
self.API_URL,
post={
"apikey": self.getConfig("passkey"),
"prio": self.getConfig("prio"),
"confirm": self.getConfig("confirm"),
"captchaperhour": self.getConfig("captchaperhour"),
"maxtimeout": self.getConfig("timeout"),
"pyload": "1",
"source": "pyload",
"base64": "1",
"mouse": mouse,
"file-upload-01": data,
"action": "usercaptchaupload",
},
)
if response.isdigit():
self.logInfo(_("NewCaptchaID from upload: %s : %s" % (response, task.captchaFile)))
for i in range(1, 100, 1):
response2 = getURL(
self.API_URL,
get={
"apikey": self.getConfig("passkey"),
"id": response,
"pyload": "1",
"source": "pyload",
"action": "usercaptchacorrectdata",
},
)
if response2 != "":
break
time.sleep(3)
result = response2
task.data["ticket"] = response
self.logInfo("result %s : %s" % (response, result))
task.setResult(result)
else:
self.logError("Bad upload: %s" % response)
return False
示例6: respond
def respond(ticket, value):
conf = join(expanduser("~"), "ct.conf")
f = open(conf, "rb")
try:
getURL("http://captchatrader.com/api/respond",
post={"is_correct": value,
"username": f.readline().strip(),
"password": f.readline().strip(),
"ticket": ticket})
except Exception, e :
print "CT Exception:", e
log(DEBUG, str(e))
示例7: getInfo
def getInfo(urls):
for url in urls:
header = getURL(url, just_header=True)
if 'Location: http://cloudzer.net/404' in header:
file_info = (url, 0, 1, url)
else:
if url.endswith('/'):
api_data = getURL(url + 'status')
else:
api_data = getURL(url + '/status')
name, size = api_data.splitlines()
size = parseFileSize(size)
file_info = (name, size, 2, url)
yield file_info
示例8: getHoster
def getHoster(self):
# If no accounts are available there will be no hosters available
if not self.account or not self.account.canUse():
print "ReloadCc: No accounts available"
return []
# Get account data
(user, data) = self.account.selectAccount()
# Get supported hosters list from reload.cc using the json API v1
query_params = dict(
via='pyload',
v=1,
get_supported='true',
get_traffic='true',
user=user
)
try:
query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
except Exception:
query_params.update(dict(pwd=data['password']))
answer = getURL("http://api.reload.cc/login", get=query_params)
data = json_loads(answer)
# If account is not valid thera are no hosters available
if data['status'] != "ok":
print "ReloadCc: Status is not ok: %s" % data['status']
return []
# Extract hosters from json file
return data['msg']['supportedHosters']
示例9: getRtUpdate
def getRtUpdate(self):
rtUpdate = self.getStorage("rtUpdate")
if not rtUpdate:
if (
self.getStorage("version") != self.__version__
or int(self.getStorage("timestamp", 0)) + 86400000 < timestamp()
):
# that's right, we are even using jdownloader updates
rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
# but we still need to fix the syntax to work with other engines than rhino
rtUpdate = re.sub(
r"for each\(var (\w+) in(\[[^\]]+\])\)\{",
r"zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];",
rtUpdate,
)
rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)
self.logDebug("rtUpdate")
self.setStorage("rtUpdate", rtUpdate)
self.setStorage("timestamp", timestamp())
self.setStorage("version", self.__version__)
else:
self.logError("Unable to download, wait for update...")
self.tempOffline()
return rtUpdate
示例10: getInfo
def getInfo(urls):
result = [] #: [ .. (name, size, status, url) .. ]
regex = re.compile(DailymotionCom.__pattern__)
apiurl = "https://api.dailymotion.com/video/"
request = {"fields": "access_error,status,title"}
for url in urls:
id = regex.search(url).group("ID")
page = getURL(apiurl + id, get=request)
info = json_loads(page)
if "title" in info:
name = info["title"] + ".mp4"
else:
name = url
if "error" in info or info["access_error"]:
status = "offline"
else:
status = info["status"]
if status in ("ready", "published"):
status = "online"
elif status in ("waiting", "processing"):
status = "temp. offline"
else:
status = "offline"
result.append((name, 0, statusMap[status], url))
return result
示例11: getInfo
def getInfo(urls):
result = []
for url in urls:
html = getURL(url)
if re.search(PutlockerCom.PATTERN_OFFLINE, html):
result.append((url, 0, 1, url))
else:
name = re.search(PutlockerCom.PATTERN_FILENAME_1, html)
if name is None:
name = re.search(PutlockerCom.PATTERN_FILENAME_2, html)
if name is None:
result.append((url, 0, 1, url))
continue
name = name.group(1)
# size = re.search(PutlockerCom.PATTERN_FILESIZE, html)
# if size is None:
# result.append((url, 0, 1, url))
# continue
# size = size.group(1)
result.append((name, 0, 2, url))
yield result
示例12: process
def process(self, pyfile):
self.prepare()
if not re.match(self.__pattern__, self.pyfile.url):
if self.premium:
self.handleOverriden()
else:
self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
else:
try:
# Due to a 0.4.9 core bug self.load would use cookies even if
# cookies=False. Workaround using getURL to avoid cookies.
# Can be reverted in 0.5 as the cookies bug has been fixed.
self.html = getURL(pyfile.url, decode=True)
self.file_info = self.getFileInfo()
except PluginParseError:
self.file_info = None
self.location = self.getDirectDownloadLink()
if not self.file_info:
pyfile.name = html_unescape(
unquote(urlparse(self.location if self.location else pyfile.url).path.split("/")[-1])
)
if self.location:
self.startDownload(self.location)
elif self.premium:
self.handlePremium()
else:
self.handleFree()
示例13: check_for_new_or_removed_hosters
def check_for_new_or_removed_hosters(self, hosters):
#get the old hosters
old_hosters = hosters.keys()
#load the current hosters from vipleech4u.com
page = getURL('http://vipleech4u.com/hosts.php')
current_hosters = self.HOSTER_PATTERN.findall(page)
current_hosters = [x.lower() for x in current_hosters]
#let's look for new hosters
new_hosters = []
for hoster in current_hosters:
if not hoster in old_hosters:
new_hosters.append(hoster)
#let's look for removed hosters
removed_hosters = []
for hoster in old_hosters:
if not hoster in current_hosters:
removed_hosters.append(hoster)
if new_hosters:
self.logDebug('The following new hosters were found on vipleech4u.com: %s' % str(new_hosters))
if removed_hosters:
self.logDebug('The following hosters were removed from vipleech4u.com: %s' % str(removed_hosters))
if not (new_hosters and removed_hosters):
self.logDebug('The hoster list is still valid.')
示例14: getCredits
def getCredits(self):
response = getURL(self.GETCREDITS_URL,
post = {"key": self.getConfig("passkey")}
)
data = dict([x.split(' ',1) for x in response.splitlines()])
return int(data['Left'])
示例15: periodical
def periodical(self):
html_parser = HTMLParser.HTMLParser()
self.items_to_pyload = []
address = "https://trakt.tv/users/%s/watchlist" % self.get_config("traktuser")
page = getURL(address)
soup = BeautifulSoup(page)
trakttitles = []
# Get Trakt Watchlist Titles
for all in soup.findAll("div", {"class": "titles"}):
for title in all.findAll("h3"):
title = title.get_text()
title = replaceUmlauts(html_parser.unescape(title))
storage = self.retrieve(title)
if storage == "downloaded":
self.log_debug(title + ": already found and downloaded")
else:
trakttitles.append(title)
self.search(trakttitles)
# Pushnotification
if len(self.get_config("pushoverapi")) > 2:
notifyPushover(self.get_config("pushoverapi"), self.items_to_pyload) if len(
self.items_to_pyload
) > 0 else True
if len(self.get_config("pushbulletapi")) > 2:
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_pyload) if len(
self.items_to_pyload
) > 0 else True