本文整理汇总了Python中urllib.request.unquote函数的典型用法代码示例。如果您正苦于以下问题:Python unquote函数的具体用法?Python unquote怎么用?Python unquote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unquote函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle_request
def handle_request(self, data=None):
"""Handles both POST and GET reqs.
In case of GET there's no data.
It also extracts data from the url path (regex groups) and passes it to
the appropriate end-handler func. """
thread_name = threading.current_thread().name
print(thread_name, self.raw_requestline)
# resolve request path to end-handler function
# (url) unquote the request path so that eventual unicode codes (%<code>) are converted back to unicode chars
delegations = [(re.fullmatch(url_pattern, unquote(self.path)), action)
for url_pattern, action in BackOfficeReqHandler.REQUEST_HANDLERS.items()
if re.fullmatch(url_pattern, unquote(self.path)) is not None]
# for an existing request path there should be exactly one handler func.
if len(delegations) == 1:
delegate = delegations[0]
args = self,
if data is not None: # if there is POST data
args = args + (data,)
for group in delegate[0].groups(): # if there are more args to be extracted from the request url (e.g. user, month, year)
args = args + (group,)
try:
return delegate[1](*args) # call the appropriate handler func
finally:
self.wfile.flush()
else: # error: page doesn't exist
self.send_response(404)
self.end_headers()
self.wfile.write(str.encode("The requested page {page} is not found!".format(page=self.path), 'utf-8'))
self.wfile.flush()
return
示例2: google
def google(bot, nick, chan, arg):
""" google <arg> -> Return the google result for <arg> """
if not arg:
return bot.msg(chan, get_doc())
args = arg.split()
print(args)
if re.match(r"-\d*", args[0]):
count = int(args[0][1:])
query = ' '.join(args[1:])
print(count, query)
else:
count = 1
query = arg
url = "http://ajax.googleapis.com/ajax/services/search/web"
params = {"v": "1.0", "safe": "off", "q": query}
data = requests.get(url, params=params)
data = data.json()
results = data["responseData"]["results"]
if not results:
bot.msg(chan, "%s: No results found." % (nick))
for i in range(0, count):
result_url = unquote(unquote(results[i]["url"]))
result_title = unescape(results[i]["titleNoFormatting"])
bot.msg(chan, "\x02%s\x02 ⟶ %s" % (bot.style.color(result_title, color="grey"), bot.style.underline(bot.hicolor(result_url))))
示例3: get_file_route
def get_file_route(path, filename):
path = path.replace('//', '/')
save_location = client_settings['LOCAL_SAVE_LOCATION']
file_location = '%s/%s' % (save_location, path)
response_file = '%s/%s' % (file_location, filename)
if os.path.isdir(response_file):
return redirect('%s/' % response_file, 301)
else:
return send_from_directory(unquote(file_location), unquote(filename))
示例4: prefix_query
def prefix_query(self, prefix, include_doc=True):
prefix_path = self.get_path(prefix)
ret = []
for k,v in self.data.items():
if k.startswith(prefix_path):
if include_doc:
ret.append({'key':unquote(k).split('!'), 'doc':v})
else:
ret.append(unquote(k).split('!'))
return ret
示例5: add_song
def add_song(self):
# Get stuff out of the URL
path = self.path.split("/")
artist = unquote(path[2])
album = unquote(path[3])
song = unquote(path[4])
# Get the corresponding objects
artist = collection.get_artist(artist)
album = artist.find_album(album)
song = album.find_song(song)
# Add the song to the queue
music.add_song(song.path)
示例6: parseRequest
def parseRequest(self, strng, encoding):
self.original_request=strng
x=strng.split('&')
for v in x:
y=v.split('=')
if len(y)>1:
if(self.parameters.get(y[0]))==None:
self.parameters[REQUEST.unquote(y[0],encoding).replace('+',' ')]=[REQUEST.unquote(y[1],encoding).replace('+',' ')]
else:
self.parameters[REQUEST.unquote(y[0],encoding).replace('+',' ')].append(REQUEST.unquote(y[1],encoding).replace('+',' '))
pass
示例7: buildRequest
def buildRequest(self, strVar, query, isCmd, isHeader, header=None):
if "[random]" in strVar:
strVar = strVar.replace("[random]", core.txtproc.rndString(16))
if isHeader:
if (header == "cookie"):
query = request.quote(query)
strVar = strVar.replace("%3b", "[semicolon]")
strVar = request.unquote(strVar)
strVar = strVar.replace("; ", "COOKIESEPARATOR").replace("=", "COOKIEEQUAL").replace(";", "COOKIESEPARATOR")
strVar = strVar.replace("[semicolon]", ";")
strVar = strVar.replace("[eq]", "=")
strVar = strVar.replace("[", "LEFTSQBRK").replace("]", "RIGHTSQBRK")
strVar = request.quote(strVar)
strVar = strVar.replace("COOKIESEPARATOR", "; ").replace("COOKIEEQUAL", "=")\
.replace("LEFTSQBRK", "[").replace("RIGHTSQBRK", "]")
else:
strVar = strVar.replace("[eq]", "=")
if isCmd:
if "[cmd]" in strVar:
strVar = strVar.replace("[cmd]", query)
if "[sub]" in strVar:
strVar = strVar.replace("[sub]", "null")
else:
if "[cmd]" in strVar:
strVar = strVar.replace(";[cmd]", "").replace("%3B[cmd]", "")
strVar = strVar.replace("[sub]", query)
if "[blind]" in strVar:
strVar = strVar.replace("[blind]", query)
return strVar
示例8: _imageinfo_from_filename
def _imageinfo_from_filename(self, path):
"""Parse some format:
>>> fmt = "rootfs:<vendor>:<arch>:<version>.<suffix.es>"
>>> ImageDiscoverer(None)._imageinfo_from_filename(fmt)
<Image vendorid=<vendor> version=<version> \
path=rootfs:<vendor>:<arch>:<version>.<suffix.es> />
"""
filename = os.path.basename(path)
log.debug("Parsing filename: %s" % filename)
# We need to unquote the filename, because it can be an ULR with
# escaped chars (like the :)
parts = unquote(filename).split(":")
assert parts.pop(0) == "rootfs", "Only supporting rootfs images"
info = RemoteImage(self.remote)
info.path = path
info.vendorid = parts.pop(0)
info.arch = parts.pop(0)
# Strip an eventual suffix
info.version, sep, info.suffix = parts.pop(0).partition(".")
return info
示例9: __init__
def __init__(self, url, request = None, is_url_page = False):
if request:
url += '{}/'.format(request)
if not is_url_page:
super().__init__(url)
self.url = unquote(split(r'\?', url)[0]) + '/'
# get name of the city
city = split(r'/', url)[3]
self.city = ''
for (ru, en) in cities.items():
if en == city:
self.city = ru
break
else:
self.page = url
self.url = None
self._bs = BeautifulSoup(self.page, 'html.parser')
firm_num = split(' ',
self._bs.find('h1', class_='searchResults__headerName').text
)[0]
num = toint(firm_num)
if num:
self.num_pages = ceil(num/12)
else:
self.num_pages = 1
self.page_num = int(self._bs.find('span', class_='pagination__page _current').string)
示例10: _on_success
def _on_success(self, resp, paging):
""" This can be overridden in user-defined blocks.
Defines how successful polling requests will be handled.
"""
self._reset_retry_cycle()
signals, paging = self._process_response(resp)
self.logger.debug('signals pre-remove-duplicates: %s' % signals)
signals = self._discard_duplicate_posts(signals)
self.logger.debug('signals post-remove-duplicates: %s' % signals)
# add the include_query attribute if it is configured
if self.include_query() and signals is not None:
for s in signals:
setattr(
s, self.include_query(), unquote(self.current_query)
)
if signals:
self.notify_signals(signals)
if paging:
self.page_num += 1
self._paging()
else:
self._epilogue()
示例11: do_GET
def do_GET(self):
assert self.path[0] == "/"
target = unquote(self.path[1:])
if target == "":
self.send_response(302)
self.send_header("Location", "?" + args.initial_target)
self.end_headers()
return
if not target.startswith("?"):
self.send_response(404)
self.end_headers()
return
target = target[1:]
ninja_output, ninja_error, exit_code = ninja_dump(target)
if exit_code == 0:
page_body = generate_html(parse(ninja_output.strip()))
else:
# Relay ninja's error message.
page_body = "<h1><tt>%s</tt></h1>" % ninja_error
self.send_response(200)
self.end_headers()
self.wfile.write(create_page(page_body).encode("utf-8"))
示例12: xml_get_text
def xml_get_text(_node):
"""Helper function to get character data from an XML tree"""
rc = list()
for node in _node.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return unquote(''.join(rc))
示例13: get_mp3_from_url
def get_mp3_from_url(url, folder, converse=True):
"""
Results are saved in folder folder.
folder must end with / or \\.
Relative and absolute paths accepted
"""
prefix = 'http://incompetech.com'
split_by = '/music/royalty-free/mp3-royaltyfree/'
middle = split_by
try: webpage = urlreq.urlopen(prefix + url).read().decode('utf-8')
except Exception as e:
print(e, 'while doing', url)
return -1
splitted = webpage.split(split_by)[1]
extracted = splitted.split('>')[0].strip('"')
prettier_name = urlreq.unquote(extracted)
mp3 = prefix + \
middle + \
extracted
f = open(folder + prettier_name, 'wb')
mp3 = urlreq.urlopen(mp3).read()
f.write(mp3)
f.close()
size = os.stat(folder + prettier_name).st_size
if converse: print('Downloaded', prettier_name, '\twith size of', size_to_units(size), '.')
return size
示例14: match_data_request
def match_data_request(url):
html = page_request(url)
xhash = unquote(re.search('"xhash":"(.+?)"', html).group(1))
id_match = re.search('"id":"(.+?)"', html).group(1)
id_sport = re.search('"sportId":(.+?)', html).group(1)
id_version = re.search('"versionId":(.+?)', html).group(1)
return id_version, id_sport, id_match, xhash
示例15: get_access_token
def get_access_token(self,xrenew=False):
"""
生成人人网认证请求链接
:param xrenew: 如果此值为真,则会强制重新获取access_token,用于更换用户
"""
#获取人人网认证信息
url = self.info["AUTHORIZE"]
#拼接请求字段
param = { "client_id": self.info["API_KEY"],
"redirect_uri": self.info["REDIRECT_URL"],
"response_type": "token",
"display": "popup"
}
if xrenew:
param["x_renew"] = "True"
#生成请求链接
request = urlencode(param)
r_url = "%s?%s" % (url,request)
open_new_tab(r_url)
self.info["ACCESS_TOKEN"] = \
unquote(input("请输入浏览器中的access_token:\n"))
self.config.set("Renren", "access_token", self.info["ACCESS_TOKEN"])