本文整理汇总了Python中urllib.urlopen函数的典型用法代码示例。如果您正苦于以下问题:Python urlopen函数的具体用法?Python urlopen怎么用?Python urlopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlopen函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getRss
def getRss():
codert = urllib.urlopen("http://actualidad.rt.com/feeds/all.rss?rss=1")
codetn = urllib.urlopen("http://www.tn.com.ar/rss.xml")
codeinfobae = urllib.urlopen("http://cdn01.ib.infobae.com/adjuntos/162/rss/Infobae.xml")
#codetelam = urllib.urlopen("http://www.telam.com.ar/rss2/ultimasnoticas.xml")
jsonrt = {"rss" : base64.b64encode(codert.read())}
filert = open("../servicios/rt/serv.json", 'w')
filert.write(json.dumps(jsonrt))
filert.close()
jsontn = {"rss" : base64.b64encode(codetn.read())}
filetn = open("../servicios/tn/serv.json", 'w')
filetn.write(json.dumps(jsontn))
filetn.close()
jsoninfobae = {"rss" : base64.b64encode(codeinfobae.read())}
fileinfobae = open("../servicios/infobae/serv.json", 'w')
fileinfobae.write(json.dumps(jsoninfobae))
filert.close()
'''filetelam = open("../servicios/telam/rss.xml", 'w')
filetelam.write(codetelam.read())
filetelam.close()'''
print getTime(),"[RSS] RSS's actualizados"
threading.Timer(300.0, getRss).start()
示例2: wait_on_app
def wait_on_app(port):
""" Waits for the application hosted on this machine, on the given port,
to respond to HTTP requests.
Args:
port: Port where app is hosted on the local machine
Returns:
True on success, False otherwise
"""
backoff = INITIAL_BACKOFF_TIME
retries = MAX_FETCH_ATTEMPTS
private_ip = appscale_info.get_private_ip()
url = "http://" + private_ip + ":" + str(port) + FETCH_PATH
while retries > 0:
try:
urllib.urlopen(url)
return True
except IOError:
retries -= 1
logging.warning("Application was not up at %s, retrying in %d seconds"%\
(url, backoff))
time.sleep(backoff)
backoff *= 2
logging.error("Application did not come up on %s after %d attemps"%\
(url, MAX_FETCH_ATTEMPTS))
return False
示例3: find_proxy
def find_proxy( url, timeout, testing_url):
try:
response = urllib.urlopen( url )
except:
if Debug: print "Request to get proxy failed."
return (False, False)
result=response.getcode()
content = response.read()
data = json.loads( content )
if Debug: print data['curl']
start_time = time.time()
try:
response = urllib.urlopen(testing_url, proxies={'http':data['curl']})
except:
if Debug: print "Proxy test request failed."
return (False, False)
result=response.getcode()
request_time = time.time() - start_time
if result == 200:
if Debug: print "\n\nGot test url with %d in %f seconds" % (result, request_time)
return (data['curl'], request_time)
else:
if Debug: print "Failed with %d" % result
return (False, False)
示例4: get
def get(self, action=""):
url = self.request.get("url")
try: #bit.ly
result = urllib.urlopen("http://api.bit.ly/v3/shorten?login=crowy&apiKey=R_57bab6c0fb01da4e1e0a5e22f73c3a4a&format=json&longUrl=%s" % urllib.quote(url)).read()
json = simplejson.loads(result)
if json['status_code'] == 200:
self.response.out.write(json['data']['url'])
return
else:
logging.warn(result)
except:
logging.warn("Unexpected error.")
try: #goo.gl
api_url = 'https://www.googleapis.com/urlshortener/v1/url?key=AIzaSyBRoz9ItBIQgHwWbZbmkF45dFiRKub2XzI&userip='+self.request.remote_addr
post_data = simplejson.dumps({'longUrl':url})
result = urlfetch.fetch(url=api_url,
payload=post_data,
method=urlfetch.POST,
headers={'Content-Type': 'application/json'})
if result.status_code == 200:
result = simplejson.loads(result.content)
self.response.out.write(result['id'])
return
else:
logging.warn(result.content)
except:
logging.warn("Unexpected error.")
try:#tinyurl
short_url = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % urllib.quote(url))
self.response.out.write(short_url.read())
return
except:
logging.warn("Unexpected error.")
self.error(400)
示例5: run
def run( self ):
"""Executes the body of the script."""
logging.info("Log level set to INFO")
logging.debug("Log Level set to DEBUG")
jobNumber = self.jobNumber
#print jobNumber
#print jobNumber[:3]
portalURL = self.server
home = '/home/sbsuser/pacbio/raw/'
splicehome = '/pacbio/raw'
ext = self.opts.ext
#print ext
records = set()
if ext == "ccs_reads.fastq":
cmd = 'wget http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/ccs_reads.fastq' % (jobNumber[:3],jobNumber)
logging.info(cmd)
os.system(cmd)
elif ext == "cmp.h5":
#logIn = urllib.urlopen('http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/aligned_reads.cmp.h5' % (jobNumber[:3],jobNumber))
cmd = 'wget http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/data/aligned_reads.cmp.h5' % (jobNumber[:3],jobNumber)
logging.info(cmd)
os.system(cmd)
elif ext == "bax.h5":
logIn = urllib.urlopen('http://node1.1425mad.mssm.edu/pacbio/secondary/%s/%s/log/smrtpipe.log' % (jobNumber[:3],jobNumber))
for line in logIn:
if home in line:
ll = line.split(" ")
#print "starting a new set:"
#print ll
#print "raw: ", line
line = ll[-3]
#print "split: ", line
#print portalURL
#print line[line.find(splicehome):line.find("bax.h5")]
#print ext
if not "m" in line[line.find(splicehome):line.find("bax.h5")]:
continue
records.add(portalURL+line[line.find(splicehome):line.find("bax.h5")] + ext)
records.add(portalURL+line[line.find(splicehome):line.find("bax.h5")-2] + "bas.h5")
#print records
else:
print >>sys.stderr, "Not supported file type!"
for address in records:
logging.info(address)
fileIn = urllib.urlopen(address)
if self.opts.noprefix:
fileout = open(address.split('/')[-1],'w')
else:
fileout = open(self.prefix+address.split('/')[-1],'w')
fileout.write(fileIn.read())
fileout.close()
return 0
示例6: downloadPSSOPage
def downloadPSSOPage(self):
username = self.lineEdit.text()
password = self.lineEdit_2.text()
login_url = 'https://psso.fh-koeln.de/qisserver/rds?state=user&type=1&category=auth.login&startpage=portal.vm&breadCrumbSource=portal'
params = urllib.urlencode({'asdf': username, 'fdsa': password, 'submit':'Login'}) # lol
html_1 = urllib.urlopen(login_url, params).read()
if not noten.checkLogin(html_1):
self.showErrorPopup(u"Sind die Login Daten möglicherweise falsch?")
return
try:
# Von hier an simulieren wir einen Crawler
html_2 = urllib.urlopen(noten.getLinkByName(html_1, "Prüfungsverwaltung")).read()
html_3 = urllib.urlopen(noten.getLinkByName(html_2, "Notenspiegel")).read()
html_4 = urllib.urlopen(noten.getLinkByName(html_3, re.compile("Abschluss"))).read()
html_5 = urllib.urlopen(noten.getLinkByGraphic(html_4, "/QIS/images//his_info3.gif")).read()
except TypeError as e:
self.showErrorPopup(u"Scheinbar haben sich die PSSO Seiten verändert… Sag' bitte Hugo bescheid, damit er das Programm reparieren kann!")
return
try:
anz_noten, anz_credits, schnitt = noten.getInfos(html_5)
name = noten.getStudentName(html_5)
self.presentResults(anz_noten, anz_credits, schnitt, name)
except noten.ParsingError as e:
self.showErrorPopup(str(e))
示例7: parseresultpage
def parseresultpage(page, search, order, sort, regex):
logger.info(" [+] Pulling results from page " + str(page))
githubbase = "https://github.com/search?"
githubsearchurl = {"o": order, "p": page, "q": search, "s": sort, "type": "Code", "ref": "searchresults"}
searchurl = githubbase + str(urlencode(githubsearchurl))
pagehtml = urlopen(searchurl).read()
soup = BeautifulSoup(pagehtml, "html.parser")
# Find GitHub div with code results
results = soup.findAll("div", attrs={"class": "code-list-item"})
# Pull url's from results and hit each of them
soup1 = BeautifulSoup(str(results), "html.parser")
for item in soup1.findAll("p", attrs={"class": "full-path"}):
soup2 = BeautifulSoup(str(item), "html.parser")
for link in soup2.findAll("a"):
individualresult = "https://github.com" + str(link["href"])
individualresultpage = urlopen(individualresult).read()
soup3 = BeautifulSoup(str(individualresultpage), "html.parser")
for rawlink in soup3.findAll("a", attrs={"id": "raw-url"}):
rawurl = "https://github.com" + str(rawlink["href"])
if args.custom_regex:
searchcode(rawurl, regex)
else:
wpsearchcode(rawurl, regex)
示例8: main_loop
def main_loop(self):
while True:
try:
time.sleep(1)
this_dir = os.listdir(os.getcwd())
that_dir = eval(urllib.urlopen(self.url + "/list/" + self.username + "/" + self.password).read())
if str(this_dir) != str(that_dir):
for this in this_dir:
if this not in self.files and this != sys.argv[0]:
with open(this, "rb") as md5file:
print "added", this
self.files[this] = hashlib.md5(md5file.read()).hexdigest()
if this not in that_dir and this != sys.argv[0]:
thread.start_new_thread(self.upload, (this,))
for that in that_dir:
if that not in this_dir:
thread.start_new_thread(self.download, (that,))
for file in self.files:
try:
with open(file, "rb") as check_file:
check = hashlib.md5(check_file.read()).hexdigest()
if check != self.files[file]:
print file, "changed"
urllib.urlopen(
self.url + "/delete/" + self.username + "/" + self.password + "/" + file
)
self.files[file] = check
thread.start_new_thread(self.upload, (file,))
except IOError:
pass
except IOError:
print "It seems as though your server is down, please check it."
time.sleep(60)
示例9: get_cur
def get_cur(self):
types = ('alpha','beta','pre','rc',None,'p')
version = self.base[0]
if self.opts.version is not None:
version = self.opts.version
type = self.base[1]
if self.opts.type == 'tagged':
type = 4
self.opts.hash = None
elif self.opts.type is not None:
type = types.index(self.opts.type)
if self.opts.branch:
self.branch = self.opts.branch
elif type < 4:
self.branch = 'master'
else:
self.branch = 'fixes/{0}'.format('.'.join(version.split('.')[0:2]))
if type != 4:
if self.opts.hash is None:
commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + urllib.quote(self.branch, '')))
self.opts.hash = commit['sha']
if self.opts.date is None:
self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')
print "Autoselecting hash: "+self.opts.hash
elif self.opts.date is None:
commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + self.opts.hash))
self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')
self.cur = (version, type, self.opts.date)
if self.opts.verbose: print 'New version set to: {0}-{1}'.format(self.name,self.get_version(self.cur))
示例10: update_urls
def update_urls():
workers = itertools.cycle(models.get_workers())
remote_urls = models.get_urls_to_check()
for url in remote_urls:
response = _make_request(workers.next(), url)
for subscriber in url.subscribers:
urllib.urlopen(subscriber.callback, data=response)
示例11: __init__
def __init__(self, versions):
self.versions = versions
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field")
data = json.loads(resp.read())
self.fieldIdMap = {}
for part in data:
self.fieldIdMap[part['name']] = part['id']
self.jiras = []
at=0
end=1
count=100
while (at < end):
params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join(versions)+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
data = json.loads(resp.read())
if (data.has_key('errorMessages')):
raise Exception(data['errorMessages'])
at = data['startAt'] + data['maxResults']
end = data['total']
self.jiras.extend(data['issues'])
self.iter = self.jiras.__iter__()
示例12: SendSms
def SendSms(self,text="Default text message"):
deltaT=datetime.now()-self.last_sms
if deltaT.seconds>self.min_period or self.sent==0:
if self.settings.debug>0:
logging.info("Debug SMS sent %s " %time.strftime("%I:%M:%S %p", time.localtime()))
self.sent=1
return 1
else:
self.last_sms=datetime.now()
self.config={}
self.config['username']=self.username
self.config['password']=self.password
#needs to be changed with international prefix what an hassle!
self.config['to']=self.to
self.config['from']=self.sender
self.config['text']=text
self.config['maxconcat']=self.MaxConCatMsgs
query = urllib.urlencode(self.config)
try:
if self.UseProxies>0 :
file = urllib.urlopen(self.url1+self.pushpage, query,proxies=self.proxies)
else:
file = urllib.urlopen(self.url1+self.pushpage, query,proxies=None)
except IOError, (errno):
logging.error ("Error delivering online SMS %s " %errno)
return 0
self.sent=1
self.output = file.read()
file.close()
logging.info("Message sent to %s from %s" % (self.to, self.sender))
#return self.ParseRequest()
return 1
示例13: _TreeStatusTestHelper
def _TreeStatusTestHelper(self, tree_status, general_state, expected_return,
retries_500=0, max_timeout=0):
"""Tests whether we return the correct value based on tree_status."""
return_status = self._TreeStatusFile(tree_status, general_state)
self.mox.StubOutWithMock(urllib, 'urlopen')
status_url = 'https://chromiumos-status.appspot.com/current?format=json'
backoff = 1
sleep_timeout = 1
for _attempt in range(retries_500):
urllib.urlopen(status_url).AndReturn(return_status)
return_status.getcode().AndReturn(500)
time.sleep(backoff)
backoff *= 2
urllib.urlopen(status_url).MultipleTimes().AndReturn(return_status)
# Time is checked twice to bootstrap.
start_time = 1
self.mox.StubOutWithMock(time, 'time')
time.time().AndReturn(start_time)
time.time().AndReturn(start_time)
if expected_return == False:
for time_plus in xrange(max_timeout + 1):
time.time().AndReturn(start_time + time_plus)
self.mox.StubOutWithMock(cros_build_lib, 'Info')
cros_build_lib.Info(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
time.sleep(sleep_timeout).MultipleTimes()
return_status.getcode().MultipleTimes().AndReturn(200)
return_status.read().MultipleTimes().AndReturn(return_status.json)
self.mox.ReplayAll()
self.assertEqual(cros_build_lib.TreeOpen(status_url, sleep_timeout,
max_timeout), expected_return)
self.mox.VerifyAll()
示例14: getTiempo
def getTiempo():
dicc = {}
code = urllib.urlopen("http://weather.yahooapis.com/forecastrss?w=468739&u=c")
tempsmn = urllib.urlopen("http://www.smn.gov.ar/layouts/temperatura_layout.php").read().decode('iso-8859-1').encode("utf-8").split("º")[0]
parse = et.parse(code)
root = parse.getroot()
tiempo = root[0][12][5].attrib
dicc['temperatura'] = str(tempsmn)
if (dicc['temperatura'] == ""):
print getTime(), "[Tiempo] ADV: Temperatura de Yahoo"
dicc['temperatura'] = str(tiempo['temp'])
estadosArray = ['Tornado', 'Tormenta tropical', 'Huracán', "Tormentas fuertes", "Tormenta", "Lluvia y nieve", "Lluvia y aguanieve", "Aguanieve y nieve", "Llovizna helada", "Llovizna", "Lluvia helada", "Lluvia", "Lluvia", "Copos de nieve", "Lluvia ligera", "Nieve y viento", "Nieve", "Granizo", "Aguanieve", "Polvo", "Brumoso", "Neblina", "Humo", "Un poco ventoso", "Ventoso", "Frío", "Nublado", "Parcialmente nublado", "Parcialmente nublado", "Un poco nublado", "Un poco nublado", "Despejado", "Soleado", "Templado", "Templado", "Lluvia con piedras", "Caluroso", "Tormentas aisladas", "Tormentas dispersas", "Tormentas dispersas", "Lluvias Dispersas", "Fuertes Nevadas", "Nevada Leve Dispersa", "Fuertes Nevadas", "Parcialmente Nublado", "Chaparrón", "Nevada Leve", "Chaparrones Aislados"]
dicc['estado'] = estadosArray[int(tiempo['code'])]
dicc['icono'] = 'http://l.yimg.com/a/i/us/nws/weather/gr/'+ str(tiempo['code']) + dayOrNight(tiempo['date']) + '.png'
dicc['minima'] = root[0][12][7].attrib['low']
dicc['maxima'] = root[0][12][7].attrib['high']
pronextarray = []
dicc['extendido'] = pronextarray
for x in range(8,12):
pronextarray.append({"dia" : root[0][12][x].attrib['day'], "fecha" : root[0][12][x].attrib['date'], "minima" : root[0][12][x].attrib['low'], "maxima" : root[0][12][x].attrib['high'], "estado" : estadosArray[int(root[0][12][x].attrib['code'])], "icono" : 'http://l.yimg.com/a/i/us/nws/weather/gr/'+ str(root[0][12][x].attrib['code']) + 'd.png'})
jsonString = json.dumps(dicc)
jsonFile = open("../servicios/tiempo/serv.json", 'w')
jsonFile.write(jsonString)
jsonFile.close()
print getTime(), "[Tiempo] JSON editado"
threading.Timer(1800.0, getTiempo).start()
示例15: _stop_server
def _stop_server(cls):
import urllib # Yup, that's right.
try:
urllib.urlopen(cls.scheme + '://' + cls.host + ':' + str(cls.port) + '/shutdown')
except IOError:
pass
cls.server_thread.join()