本文整理汇总了Python中urlparse.unquote函数的典型用法代码示例。如果您正苦于以下问题:Python unquote函数的具体用法?Python unquote怎么用?Python unquote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unquote函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _parse_qs
def _parse_qs(qs):
pairs = (s2 for s1 in qs.split('&') for s2 in s1.split(';'))
retval = odict()
for name_value in pairs:
if name_value is None or len(name_value) == 0:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
nv.append(None)
name = unquote(nv[0].replace('+', ' '))
value = None
if nv[1] is not None:
value = unquote(nv[1].replace('+', ' '))
l = retval.get(name, None)
if l is None:
l = retval[name] = []
l.append(value)
return retval
示例2: db_url_parse
def db_url_parse(url, engine=None, conn_max_age=0):
"""
Parses a database URL.
"""
if url == "sqlite://:memory:":
# urlparse will choke on :memory:
return {
"ENGINE": DATABASE_ENGINE_SCHEMES["sqlite"],
"NAME": ":memory:",
}
config = {}
url = urlparse.urlparse(url)
# split query strings from path
path = url.path[1:]
if "?" in path and not url.query:
path, query = path.split("?", 2)
else:
path, query = path, url.query
query = urlparse.parse_qs(query)
# sqlite with no path should assume :memory: (sqlalchemy behavior)
if url.scheme == "sqlite" and path == "":
path = ":memory:"
# handle postgresql percent-encoded paths
hostname = url.hostname or ""
if "%2f" in hostname.lower():
hostname = hostname.replace("%2f", "/").replace("%2F", "/")
config.update({
"NAME": urlparse.unquote(path or ""),
"USER": urlparse.unquote(url.username or ""),
"PASSWORD": urlparse.unquote(url.password or ""),
"HOST": hostname,
"PORT": url.port or "",
"CONN_MAX_AGE": conn_max_age,
})
engine = DATABASE_ENGINE_SCHEMES[url.scheme] if engine is None else engine
# pass the query string into OPTIONS
options = {}
for key, values in query.items():
if url.scheme == "mysql" and key == "ssl-ca":
options["ssl"] = {"ca": values[-1]}
continue
options[key] = values[-1]
# postgresql schema URLs
if "currentSchema" in options and engine == "django.db.backends.postgresql_psycopg2":
options["options"] = "-c search_path={0}".format(options["currentSchema"])
if options:
config["OPTIONS"] = options
if engine:
config["ENGINE"] = engine
return config
示例3: get
def get(self, moduleID, title, institution, teachDate, instructors, description):
account = get_account()
if account:
newCourse = dict()
newCourse["CourseURL"] = urlparse.unquote(
self.request.get('courseURL'))
newCourse["Title"] = title
newCourse["Institution"] = institution
newCourse["TeachingDate"] = teachDate
newCourse["Instructors"] = instructors
newCourse["Description"] = description
newCourse["DownloadPageLink"] = urlparse.unquote(
self.request.get('materials'))
newCourse["scoreRanking"] = 1
moduleID = int(moduleID)
match = Module.query(Module.category == moduleID).fetch()
match = match[0]
moduleCourses = match.courses
newCourse['ID'] = len(moduleCourses)
moduleCourses.append(newCourse)
match.courses = moduleCourses
match.courses = sorted(
match.courses, key=lambda k: k['scoreRanking'], reverse=True)
match.put()
response = {'success': 'Course submitted successfully.'}
else:
response = {'error': 'You are not logged in. '}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(response))
示例4: parse_url
def parse_url(url):
port = path = auth = userid = password = None
# parse with HTTP URL semantics
scheme = urlparse(url).scheme
parts = urlparse(url.replace("%s://" % (scheme, ), "http://"))
# The first pymongo.Connection() argument (host) can be
# a mongodb connection URI. If this is the case, don't
# use port but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
if scheme == 'mongodb':
# strip the scheme since it is appended automatically.
hostname = url[len('mongodb://'):]
else:
netloc = parts.netloc
if '@' in netloc:
auth, _, netloc = parts.netloc.partition('@')
userid, _, password = auth.partition(':')
hostname, _, port = netloc.partition(':')
path = parts.path or ""
if path and path[0] == '/':
path = path[1:]
port = port and int(port) or port
return dict({"hostname": unquote(hostname or "") or None,
"port": port or None,
"userid": unquote(userid or "") or None,
"password": unquote(password or "") or None,
"transport": scheme,
"virtual_host": unquote(path or "") or None},
**kwdict(dict(parse_qsl(parts.query))))
示例5: __init__
def __init__(self, parsed_url):
super(S3Source, self).__init__(parsed_url)
self.acccess_key_id = urlparse.unquote(parsed_url.username)
self.secret_access_key = urlparse.unquote(parsed_url.password)
self.conn = boto.connect_s3(self.acccess_key_id, self.secret_access_key)
self.bucket_name = parsed_url.hostname
self.bucket = self.conn.get_bucket(self.bucket_name, validate=False)
示例6: getSongs
def getSongs(self):
format = "%Y-%m-%d %H:%M:%S"
for trackid,attributes in self.il['Tracks'].iteritems():
s = Song()
s.name = attributes.get('Name')
s.artist = attributes.get('Artist')
s.album_artist = attributes.get('Album Artist')
s.composer = attributes.get('Composer')
s.album = attributes.get('Album')
s.genre = attributes.get('Genre')
s.kind = attributes.get('Kind')
if attributes.get('Size'):
s.size = int(attributes.get('Size'))
s.total_time = attributes.get('Total Time')
s.track_number = attributes.get('Track Number')
if attributes.get('Track Count'):
s.track_count = int(attributes.get('Track Count'))
if attributes.get('Disc Number'):
s.disc_number = int(attributes.get('Disc Number'))
if attributes.get('Disc Count'):
s.disc_count = int(attributes.get('Disc Count'))
if attributes.get('Year'):
s.year = int(attributes.get('Year'))
if attributes.get('Date Modified'):
s.date_modified = time.strptime(str(attributes.get('Date Modified')),format)
if attributes.get('Date Added'):
s.date_added = time.strptime(str(attributes.get('Date Added')),format)
if attributes.get('Bit Rate'):
s.bit_rate = int(attributes.get('Bit Rate'))
if attributes.get('Sample Rate'):
s.sample_rate = int(attributes.get('Sample Rate'))
s.comments = attributes.get("Comments ")
if attributes.get('Rating'):
s.rating = int(attributes.get('Rating'))
if attributes.get('Play Count'):
s.play_count = int(attributes.get('Play Count'))
if attributes.get('Location'):
if ( self.musicPathXML is None or self.musicPathSystem is None ):
s.location = unicode(urlparse.unquote(urlparse.urlparse(attributes.get('Location')).path[1:]),"utf8")
else:
s.location = unicode(urlparse.unquote(urlparse.urlparse(attributes.get('Location')).path[1:]).replace(self.musicPathXML,self.musicPathSystem),"utf8")
s.compilation = 'Compilation' in attributes
if attributes.get('Play Date UTC'):
s.lastplayed = time.strptime(str(attributes.get('Play Date UTC')),format)
if attributes.get('Total Time'):
s.length = int(attributes.get('Total Time'))
if attributes.get('Grouping'):
s.grouping = attributes.get('Grouping')
if self.filesOnly==True and attributes.get('Track Type') == 'File':
if self.legacymode:
self.songs.append(s)
else:
self.songs[int(trackid)] = s
elif self.filesOnly==False:
if self.legacymode:
self.songs.append(s)
else:
self.songs[int(trackid)] = s
示例7: internalapi_testcaps
def internalapi_testcaps(args):
indexer = urlparse.unquote(args["indexer"])
apikey = args["apikey"]
host = urlparse.unquote(args["host"])
logger.debug("Check caps for %s" % indexer)
try:
result = check_caps(host, apikey)
return jsonify({"success": True, "result": result})
except IndexerResultParsingException as e:
return jsonify({"success": False, "message": e.message})
示例8: parse_qsl
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""This was a slightly modified version of the function with the same name
that is defined in urlparse.py . I modified it, and then reverted the patch
to have different handling of '+':
- name = unquote(nv[0].replace('+', ' '))
- value = unquote(nv[1].replace('+', ' '))
+ name = unquote(nv[0])
+ value = unquote(nv[1])
Due to this [0] bug: "Proxy (and maybe others) affected by querystring +
not being decoded by URL class #9139", I reverted my changes to the function
but kept it here for better docs.
[0] https://github.com/andresriancho/w3af/issues/9139
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % name_value)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = urlparse.unquote(nv[0].replace('+', ' '))
value = urlparse.unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
示例9: compare_urls
def compare_urls(url_a, url_b):
if url_a.netloc != url_b.netloc or url_a.query != url_b.query:
return False
# remove / from the end of the url if required
path_a = url_a.path[:-1]\
if url_a.path.endswith('/')\
else url_a.path
path_b = url_b.path[:-1]\
if url_b.path.endswith('/')\
else url_b.path
return unquote(path_a) == unquote(path_b)
示例10: parse_qsl
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""This is a slightly modified version of the function with the same name
that is defined in urlparse.py . I had to modify it in order to have
'+' handled in the way w3af needed it. Note that the only change is:
- name = unquote(nv[0].replace('+', ' '))
- value = unquote(nv[1].replace('+', ' '))
+ name = unquote(nv[0])
+ value = unquote(nv[1])
In other words, keep those + !
Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = urlparse.unquote(nv[0])
value = urlparse.unquote(nv[1])
r.append((name, value))
return r
示例11: handle
def handle(self, *args, **options):
for templatename in args:
# if templatename[0:1] == 't':
print templatename
f = open(templatename, "r")
lines = f.readlines()
f.close()
# print lines
attribs = []
for line in lines:
currentattrib = line.split('=',1)
attribs.append(currentattrib)
print currentattrib[0]
print urlparse.unquote(currentattrib[1]).strip()
示例12: internalapi_testcaps
def internalapi_testcaps(args):
indexer = urlparse.unquote(args["indexer"])
apikey = args["apikey"]
host = urlparse.unquote(args["host"])
logger.debug("Check caps for %s" % indexer)
try:
ids, types = check_caps(host, apikey)
ids = sorted(list(ids))
types = sorted(list(types))
return jsonify({"success": True, "ids": ids, "types": types})
except IndexerResultParsingException as e:
return jsonify({"success": False, "message": e.message})
示例13: download_request
def download_request(self, request, spider):
p = urlparse_cached(request)
scheme = 'https' if request.meta.get('is_secure') else 'http'
bucket = p.hostname
path = p.path + '?' + p.query if p.query else p.path
url = '%s://%s.s3.amazonaws.com%s' % (scheme, bucket, path)
signed_headers = self.conn.make_request(
method=request.method,
bucket=bucket,
key=unquote(p.path),
query_args=unquote(p.query),
headers=request.headers,
data=request.body)
httpreq = request.replace(url=url, headers=signed_headers)
return self._download_http(httpreq, spider)
示例14: do_GET
def do_GET(self):
"""Respond to GET requests"""
from urlparse import unquote
path, headers, data = self._ParseBaseRequest('GET')
if None is path: return
if ('mpd' == path[0]) and (2 == len(path)):
self._AlterMPD(unquote(path[1]), headers, data)
elif ('subtitles' == path[0]) and (3 == len(path)):
self._TranscodeSubtitle(unquote(path[1]), headers, data, path[2])
else:
Log('[PS] Invalid request received', Log.DEBUG)
self.send_error(501, 'Invalid request')
示例15: score_results
def score_results(results):
flat_res = filter(
None, chain.from_iterable(izip_longest(*results.values())))
flat_len = len(flat_res)
engines_len = len(results)
results = []
# deduplication + scoring
for i, res in enumerate(flat_res):
res['parsed_url'] = urlparse(res['url'])
res['host'] = res['parsed_url'].netloc
if res['host'].startswith('www.'):
res['host'] = res['host'].replace('www.', '', 1)
res['engines'] = [res['engine']]
weight = 1.0
if hasattr(engines[res['engine']], 'weight'):
weight = float(engines[res['engine']].weight)
score = int((flat_len - i) / engines_len) * weight + 1
duplicated = False
for new_res in results:
p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
if res['host'] == new_res['host'] and\
unquote(p1) == unquote(p2) and\
res['parsed_url'].query == new_res['parsed_url'].query and\
res.get('template') == new_res.get('template'):
duplicated = new_res
break
if duplicated:
if res.get('content') > duplicated.get('content'):
duplicated['content'] = res['content']
duplicated['score'] += score
duplicated['engines'].append(res['engine'])
if duplicated['parsed_url'].scheme == 'https':
continue
elif res['parsed_url'].scheme == 'https':
duplicated['url'] = res['parsed_url'].geturl()
duplicated['parsed_url'] = res['parsed_url']
else:
res['score'] = score
results.append(res)
return sorted(results, key=itemgetter('score'), reverse=True)