本文整理汇总了Python中urllib.parse函数的典型用法代码示例。如果您正苦于以下问题:Python parse函数的具体用法?Python parse怎么用?Python parse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parse函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
connect = client.rtm_connect()
if not connect:
print('Slack RTM Connect Error!')
return
print('Slack RTM Connect Success!')
while True:
for data in client.rtm_read():
if data['type'] == 'message':
if 'bot_id' not in data:
parse(data['text'])
time.sleep(0.1)
示例2: __init__
def __init__(self, url, key=None, secret=None, expiration_days=0, private=False, content_type=None, create=True):
from boto.s3.connection import S3Connection
from boto.s3.key import Key
self.url = parse(url)
self.expiration_days = expiration_days
self.buffer = StringIO()
self.private = private
self.closed = False
self._readreq = True
self._writereq = False
self.content_type = content_type or mimetypes.guess_type(self.url.path)[0]
bucket = self.url.netloc
if bucket.endswith(".s3.amazonaws.com"):
bucket = bucket[:-17]
self.client = S3Connection(key, secret)
self.name = "s3://" + bucket + self.url.path
if create:
self.bucket = self.client.create_bucket(bucket)
else:
self.bucket = self.client.get_bucket(bucket, validate=False)
self.key = Key(self.bucket)
self.key.key = self.url.path.lstrip("/")
self.buffer.truncate(0)
示例3: is_open
def is_open(dpmt, course, crn):
base = "http://my.illinois.edu"
page = blogotubes('http://www.courses.illinois.edu')
if not page:
print(page); return -1
url = geturl(page, 'Class Schedule')
if not url:
print(url); return -1
page = blogotubes(base+url)
if not page:
print('lol'+page); return -1
url = geturl(page, dpmt)
if not url:
print(url); return -1
page = blogotubes(base+url) # Get list of courses in dpmt
if not page:
print(page); return -1
url = geturl(page, course)
if not url:
print(url); return -1
page = blogotubes(base+url) # Get list of sections in course
if not page:
print(page); return -1
result = parse(page, crn) # Parse openness of section
if result:
return 1
else:
return 0
示例4: migrate
def migrate(path, name):
print('----%s----' % name)
input_f = open(path, 'r', encoding='utf-8')
quotes = []
prev = ''
for line in input_f.readlines():
text, page = parse(line, prev)
if len(page) > 0:
verifyPage(page, line)
pair = dict()
pair['text'] = text.lstrip()
pair['page'] = page
quotes += [pair, ]
prev = ''
else:
prev = text
input_f.close()
if len(prev):
pair['text'] = prev
pair['page'] = 0
book = {
'title': name,
'quotes': quotes
}
return book
示例5: onSend
def onSend(self):
self.getFields()
message = self.generateMessage().toLatin1()
subject = self.subject_.toLatin1()
params = urllib.parse(
{
"kontakt": "cad",
"from_name": self.name_,
"from_mail": self.email_,
"subject": subject,
"kommentar": message,
}
)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("www.ipek.uni-karlsruhe.de:80")
conn.request("POST", "/cms/de/kontakt/kontakt.php", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
data = response.read()
conn.close()
self.close()
return
示例6: process_response
def process_response(self, r):
def parse(item, type):
text = item.xpath('.//td[3]/text()')[0].strip()
context = item.xpath('.//td[@class="codeContext"]/text()')
where = item.xpath('.//td[@class="linenumber"]/text()')[0]
return {
'type': type,
'text': text,
'context': context[0] if context else '',
'where': where
}
doc = html.document_fromstring(r)
return chain((parse(item, 'Error')
for item in doc.xpath('//div[@id="errors"]//tr')),
(parse(item, 'Warning')
for item in doc.xpath('//div[@id="warnings"]//tr')))
示例7: list_pages
def list_pages(namespace_url=None):
list_url = namespace_url or INDEX_INDEX
print('Crawling {}'.format(list_url))
tree = parse(list_url)
for a in tree.xpath('//a[@class="twikilink"]'):
name = a.text.strip()
url = a.attrib['href']
if namespace_url:
yield (name,), url
else:
yield ('Main', name), url
if not namespace_url:
namespaces = tree.xpath(
'//a[starts-with(@href, "index_report.php?groupname=")]'
)
for a in namespaces:
namespace = a.text.strip()
url = urllib.parse.urljoin(
INDEX_INDEX, a.attrib['href']
)
for key, value in list_pages(url):
assert len(key) == 1
yield (namespace,) + key, value
示例8: convert
def convert(data, field):
if isinstance(data, Literal):
data = data.value
if isinstance(data, URIRef):
return str(data)
if isinstance(field, IndexedLanguageField):
lng = {}
for d in data:
lang = d.language
if not lang:
lang = 'null'
lng[lang] = str(d)
return lng
if isinstance(data, list):
return [x for x in [convert(x, field) for x in data] if x]
elif isinstance(field, IndexedDateTimeField):
if data is None:
return None
if isinstance(data, str):
data = parse(data)
return data.strftime('%Y-%m-%dT%H:%M:%S')
elif data and isinstance(data, FedoraObject):
return data.id
return data
示例9: fetch_full_search_results
def fetch_full_search_results(course_search_url, partial_search_results):
params = {
'ICAJAX': '1',
'ICType': 'Panel',
'ICElementNum': '0',
'ICStateNum': '57',
'ICAction': '$ICField106$hviewall$0',
'ICXPos': '0',
'ICYPos': '0',
'ICFocus': '',
'ICSaveWarningFilter': '0',
'ICChanged': '-1',
'ICResubmit': '0',
'ICModalWidget': '0',
'ICZoomGrid': '0',
'ICZoomGridRt': '0',
'ICModalLongClosed': '',
'ICActionPrompt': 'false',
'ICFind': '',
'ICAddCount': '',
}
dynamic_keys = ('ICSID', 'ICStateNum')
dynamic_params = {}
for key in dynamic_keys:
dynamic_params[key] = parse('form[name=win0] input[name=%s]' % key,
partial_search_results)[0]['value']
params.update(dynamic_params)
return fetch(course_search_url, params)
示例10: main
def main():
''' download file and return it as string '''
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
inputhtml= urllib.request.urlopen(URL1).readlines()
print(cj)
imgdata = parse(inputhtml)
writedata('img.png', imgdata)
ocrfix()
password = ocrdecode()
print (password)
postdata = post_data(password)
print(postdata)
responsehtml= urllib.request.urlopen(URL1, postdata).readlines()
resultlines = list(map(lambda x: x.decode("utf-8"), responsehtml))
for r in resultlines:
print(r)
示例11: detect_redirect
def detect_redirect(self):
parse = urllib.request.urlparse
# the original url
org_url = self.url_data
# get an opener doing redirections
try:
opener = self._create_fetcher(redirect_handler=False)
response = opener.open(self.url)
except:
raise UnknownHostName(self.url)
# the new url
new_url = parse(response.geturl())
# detect a redirection
new_loc = new_url.scheme + '://' + new_url.netloc
org_loc = org_url.scheme + '://' + org_url.netloc
self.is_redirected = not(new_loc == org_loc)
if self.is_redirected:
self.printer.print_debug_line('%s redirects to %s' % (org_loc, new_loc),2)
else:
self.printer.print_debug_line('%s does not redirect' % (org_loc, ), 2)
# create an response object and add it to the cache
R = _create_response(response)
self.cache[new_loc] = R
self.cache[self.url] = R
return (self.is_redirected, new_loc)
示例12: iratingchart
def iratingchart(self, custid=None, category=ct.IRATING_ROAD_CHART):
""" Gets the irating data of a driver using its custom id (custid)
that generates the chart located in the driver's profile. """
r = self.__req(ct.URL_STATS_CHART % (custid, category),
cookie=self.last_cookie)
return parse(r)
示例13: parseParms
def parseParms(xfile):
if debugMode():
print("parseParms:",xfile)
pdict = {}
try:
statxml = os.stat(xfile)
except:
print("Error, file",xfile,"not found")
return None
try:
t = parse(xfile)
except:
print("Error,could not parse",xfile)
return None
root = t.getroot()
kids = list(root)
for k in kids:
pdict[k.tag] = k.text
return pdict
示例14: __check_cookie
def __check_cookie(self):
""" Checks the cookie by testing a request response"""
r = parse(self.__req(ct.URL_DRIVER_COUNTS, cookie=self.last_cookie))
if isinstance(r, dict):
return True
return False
示例15: get_article
def get_article(self, candidates, best_candidate):
# Now that we have the top candidate, look through its siblings for content that might also be related.
# Things like preambles, content split by ads that we removed, etc.
sibling_score_threshold = max([10, best_candidate['content_score'] * 0.2])
output = parse("<div/>")
for sibling in best_candidate['elem'].parent.contents:
if isinstance(sibling, NavigableString): continue
append = False
if sibling is best_candidate['elem']:
append = True
sibling_key = HashableElement(sibling)
if sibling_key in candidates and candidates[sibling_key]['content_score'] >= sibling_score_threshold:
append = True
if sibling.name == "p":
link_density = self.get_link_density(sibling)
node_content = sibling.string or ""
node_length = len(node_content)
if node_length > 80 and link_density < 0.25:
append = True
elif node_length < 80 and link_density == 0 and re.search('\.( |$)', node_content):
append = True
if append:
output.append(sibling)
if not output: output.append(best_candidate)
return output