本文整理汇总了Python中urllib2.quote函数的典型用法代码示例。如果您正苦于以下问题:Python quote函数的具体用法?Python quote怎么用?Python quote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了quote函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update_request_uri_query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = urllib2.quote(request.path, '/()$=\',')
#add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + urllib2.quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
示例2: set_language
def set_language(self):
"Set the language"
nextpage = request.params.get('next', None)
if not nextpage:
nextpage = request.headers.get('Referer', None)
if not nextpage:
nextpage = '/'
if '://' in nextpage:
from_url = urlparse(nextpage)
nextpage = from_url[2]
lang_code = request.params.get('language', None)
if lang_code and check_language(lang_code):
session['lang'] = lang_code
session.save()
params = []
for param in request.params:
if not param in ['language', 'amp']:
value = request.params[param]
if value:
if (param == 'came_from' and
'://' in urllib2.unquote(value)):
urlparts = urlparse(urllib2.unquote(value))
value = urlparts[2] or '/'
params.append('%s=%s' % (urllib2.quote(param),
urllib2.quote(value)))
if 'lc=1' not in params:
params.append('lc=1')
if params:
nextpage = "%s?%s" % (nextpage, '&'.join(params))
redirect(nextpage)
示例3: test_import_to_shape
def test_import_to_shape(self):
from gnmvidispine.vs_item import VSItem
i = VSItem(host=self.fake_host,port=self.fake_port,user=self.fake_user,passwd=self.fake_passwd)
i.name = "VX-123"
i.sendAuthorized = MagicMock(return_value=self.MockedResponse(200, self.import_job_doc))
with self.assertRaises(ValueError):
i.import_to_shape() #expect ValueError if neither uri nor file ref
fake_uri="file:///path/to/newmedia.mxf"
quoted_uri=quote(fake_uri,"") #we are embedding a URI as a parameter with another URL so it must be double-encoded
i.import_to_shape(uri=fake_uri,shape_tag="shapetagname",priority="HIGH")
i.sendAuthorized.assert_called_with('POST',
'/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(quoted_uri)
,"",{'Accept':'application/xml'}, rawData=False)
fake_uri = "file:///path/to/" + quote("media with spaces.mxf",safe="/")
quoted_uri = quote(fake_uri,"") # we are embedding a URI as a parameter with another URL so it must be double-encoded
i.import_to_shape(uri=fake_uri, shape_tag="shapetagname", priority="HIGH")
i.sendAuthorized.assert_called_with('POST',
'/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(
quoted_uri)
, "", {'Accept': 'application/xml'}, rawData=False)
fake_uri = "file:///path/to/" + quote("media+with+plusses.mxf",safe="/+")
quoted_uri = quote(fake_uri,"") # we are embedding a URI as a parameter with another URL so it must be double-encoded
i.import_to_shape(uri=fake_uri, shape_tag="shapetagname", priority="HIGH")
i.sendAuthorized.assert_called_with('POST',
'/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(
quoted_uri)
, "", {'Accept': 'application/xml'}, rawData=False)
示例4: get_lyrics
def get_lyrics(entry, db):
global errors
global successes
title = entry['title'].encode('utf-8')
artist = entry['artist'].encode('utf-8')
year = entry['year']
artist_clean = urllib2.quote(sanitize_artist(artist).replace(" ", "_"))
title_clean = urllib2.quote(sanitize_title(title).replace(" ", "_"))
url = 'http://lyrics.wikia.com/' + artist_clean + ':' + title_clean
page = requests.get(url)
if page.status_code != 200:
print "404 error getting lyrics for " + title + " by " + artist + ", " + str(year)
errors += 1
else:
page_soup = BeautifulSoup(page.text)
lyrics = page_soup.select(".lyricbox")
if len(lyrics) == 0:
print "Parsing error getting lyrics for " + title + " by " + artist + ", " + str(year)
errors += 1
return
lyrics = lyrics[0]
[x.extract() for x in lyrics.findAll('script')]
lyrics = lyrics.get_text(' ', strip=True).encode('utf-8')
lyrics = santize(lyrics)
entry['lyrics'] = lyrics
db.entries.save(entry)
successes += 1
print "Successfully extracted lyrics for " + title + " by " + artist
示例5: get
def get(self,method,args=None):
""" GET to DeepDetect server """
u = self.__ddurl
u += method
headers = {}
if args is not None:
sep = "?"
for arg,argv in args.iteritems():
u += sep
sep = "&"
u += urllib2.quote(arg)
u += '='
if argv is not None:
u += urllib2.quote(argv)
LOG("GET %s"%u)
response = None
try:
req = urllib2.Request(u)
response = urllib2.urlopen(req, timeout=DD_TIMEOUT)
jsonresponse=response.read()
except:
raise DDCommunicationError(u,"GET",headers,None,response)
LOG(jsonresponse)
try:
return self.__return_format(jsonresponse)
except:
raise DDDataError(u,"GET",headers,None,jsonresponse)
示例6: get_lat_lng
def get_lat_lng(address, city, state):
c = config.load()
# If address is a PO Box, skip
if re.search('P(\.)?O(\.)?(\sBox\s)[0-9]+', address) is not None or address == '':
return None
else:
url = 'https://api.smartystreets.com/street-address?'
url += 'state=' + urllib2.quote(str(state))
url += '&city=' + urllib2.quote(str(city))
url += '&auth-id=' + c['ss_id']
url += '&auth-token=' + c['ss_token']
url += '&street=' + urllib2.quote(str(address))
result = json.load(urllib2.urlopen(url))
if len(result) == 1:
lat_lng = {'lat': result[0]['metadata']['latitude'], 'lng': result[0]['metadata']['longitude']}
return lat_lng
elif len(result) == 0:
# return generic lat/lng if zero results so we can come back later to fix it
lat_lng = {'lat': 36.0, 'lng': -76.0}
return lat_lng
else:
print result
exit(-1)
示例7: build_query
def build_query(self):
"""
Builds query to access to cghub server.
"""
parts = []
for key, value in self.query.iteritems():
if isinstance(value, list) or isinstance(value, tuple):
value_str = '+OR+'.join([
self.escape_query_value(key, v) for v in value])
value_str = '(%s)' % value_str
else:
value_str = self.escape_query_value(key, value)
parts.append('='.join([key, value_str]))
if self.offset:
parts.append('='.join(['start', str(self.offset)]))
if self.limit:
parts.append('='.join(['rows', str(self.limit)]))
if self.sort_by:
if self.sort_by[0] == '-':
parts.append('='.join([
'sort_by',
'%s:desc' % urllib2.quote(self.sort_by[1:])]))
else:
parts.append('='.join([
'sort_by',
'%s:asc' % urllib2.quote(self.sort_by)]))
return '&'.join(parts)
示例8: get_SIMBAD_coordinates
def get_SIMBAD_coordinates(name):
url = VOTABLE_OPTIONS + SIMBAD_VOTABLE_SCRIPT_START + QUERY_VOTABLE_FULLCOORDINATES + SIMBAD_VOTABLE_SCRIPT_MIDDLE + name + SIMBAD_VOTABLE_SCRIPT_END
try:
response = urllib2.urlopen(SIMBAD_ROOT_1+NAME_SCRIPT+urllib2.quote(url))
except urllib2.URLError:
try:
response = urllib2.urlopen(SIMBAD_ROOT_2+NAME_SCRIPT+urllib2.quote(url))
except urllib2.URLError:
return None
try:
response_votable = votable.parse(response.fp)
first_table = response_votable.get_first_table()
except:
return None
else:
ra = float(first_table.array[0][0])
dec = float(first_table.array[0][1])
try:
coords, created = AstronomicalCoordinates.objects.get_or_create(right_ascension=ra, declination=dec)
except MultipleObjectsReturned:
coords = AstronomicalCoordinates.objects.filter(right_ascension=ra, declination=dec).first()
return coords
示例9: get_SIMBAD_object_types
def get_SIMBAD_object_types(name):
url = SIMBAD_BASIC_SCRIPT + QUERY_OTYPES + name
try:
response = urllib2.urlopen(SIMBAD_ROOT_1+NAME_SCRIPT+urllib2.quote(url))
except urllib2.URLError:
try:
response = urllib2.urlopen(SIMBAD_ROOT_2+NAME_SCRIPT+urllib2.quote(url))
except urllib2.URLError:
return None
otypes = []
ok = False
value_line = None
for line in response.readlines():
if ok and len(line.strip()) > 0:
value_line = line.strip()
if line.find(QUERY_DATA_DELIMITER) >= 0:
ok = True
if value_line is not None and len(value_line) > 0:
values = value_line.split(",")
for value in values:
otype, created = ObjectType.objects.get_or_create(value=value)
otypes.append(otype)
return otypes
示例10: decorated_function
def decorated_function(*args, **kwargs):
page = int(request.args.get('page', 1))
# 这里要转换成str类型, 否则会报类型错误
_path = request.path.encode("utf-8")
# 对于非ASCII的URL,需要进行URL编码
if quote(_path).count('%25') <= 0:
_path = quote(_path)
_viewkey = 'mobile%s' % _path if request.MOBILE else _path
cache_key = str(key % _viewkey)
if page > 1:
cache_key = '%s_%s' % (cache_key, page)
rv = cache.get(cache_key)
if rv is not None:
return rv
rv = f(*args, **kwargs)
_suffix = u"\n<!-- cached at %s -->" % str(datetime.datetime.now())
if hasattr(rv, "data"):
rv.data += _suffix
if isinstance(rv, unicode):
rv += _suffix
cache.set(cache_key, rv, timeout)
return rv
示例11: plos_search
def plos_search(query, query_type = None, rows = 20, more_parameters = None, fq = '''doc_type:full AND article_type:"Research Article"''', output = "json", verbose = False):
'''
Accesses the PLOS search API.
query: the text of your query.
query_type: subject, author, etc.
rows: maximum number of results to return.
more_parameters: an optional dictionary; key-value pairs are parameter names and values for the search api.
fq: determines what kind of results are returned.
Set by default to return only full documents that are research articles (almost always what you want).
output: determines output type. Set to JSON by default, XML is also possible, along with a few others.
'''
api_key = "..."
query_string = ""
if query_type:
query_string += query_type + ":"
query_string += '"' + query + '"'
params_string = ""
if more_parameters:
params_string = "&" + "&".join([key + "=" + quote(value) for key, value in more_parameters.iteritems()])
fq_string = "&fq=" + quote(fq)
url = "http://api.plos.org/search?q=" + query_string + params_string + fq_string + "&wt=" + output + "&rows=" + str(rows) + "&api_key=" + api_key
headers = {'Content-Type': 'application/' + output}
if verbose:
print url
r = requests.get(url, headers=headers)
r.encoding = "UTF-8" # just to be sure
return r.json()["response"]["docs"]
示例12: _generate_url
def _generate_url(self, options):
options['Service'] = 'AWSECommerceService'
options['AWSAccessKeyId'] = self.access_key_id
options['AssociateTag'] = self.associate_tag
options['Timestamp'] = self._generate_timestamp()
# 'None' が含まれている場合は削除する.
for k, v in options.items():
if v is None:
del options[k]
# 署名(v2)を作成する.
keys = sorted(options.keys())
args = '&'.join('%s=%s' % (key, urllib2.quote(unicode(options[key])
.encode('utf-8'), safe='~')) for key in keys)
msg = 'GET'
msg += '\n' + self.uri
msg += '\n' + self.end_point
msg += '\n' + args
hmac.new(self.secret_key or '', msg, hashlib.sha256).digest()
signature = urllib2.quote(
base64.b64encode(hmac.new(self.secret_key or '', msg, hashlib.sha256).digest()))
url = "http://%s%s?%s&Signature=%s" % (self.uri, self.end_point, args, signature)
return url
示例13: _get_archived_json_results
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON %
(urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
# FIXME: We should talk to the network via a Host object.
results_file = urllib2.urlopen(results_file_url)
info = results_file.info()
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if (http_error.code < 400 and http_error.code >= 500):
error = http_error
示例14: translate
def translate(phrase, in_lang):
if in_lang == "en":
out_lang = "ja"
else:
out_lang = "en"
if True:
url = (
"http://api.microsofttranslator.com/V2/Ajax.svc/GetTranslations?appId=F2926FC35C3732CEC3E9C92913745F9C28912821&from="
+ in_lang
+ "&to="
+ out_lang
+ "&maxTranslations=1"
)
url += "&text=" + quote(phrase.encode("utf-8"))
response = urlfetch.fetch(url=url)
content = re.sub(u"\xEF\xBB\xBF", "", response.content)
data = json.loads(content)
translated_text = data["Translations"][0]["TranslatedText"]
time.sleep(0.1)
else:
url = "https://www.googleapis.com/language/translate/v2?"
url += "&source=" + in_lang
url += "&target=" + out_lang
url += "&q=" + quote(phrase.encode("utf-8"))
url += "&key=" + "AIzaSyAI3PoUAJ_uP0o33EDgUfSEUMALepQAaNA"
content = urlfetch.fetch(url=url).content
data = json.loads(content)
translated_text = data["data"]["translations"][0]["translatedText"]
return translated_text
示例15: searchBook
def searchBook(isbn_num):
logText("Searching for: ", isbn_num)
query = "AWSAccessKeyId=" + AWSAccessKeyID + "&AssociateTag=abc&Keywords="
query += isbn_num
query += "&Operation=ItemSearch&ResponseGroup=ItemAttributes&SearchIndex=Books&Service=AWSECommerceService"
query += "&Timestamp=" + urllib2.quote(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z"))[:-1]
# query += "&Version=2011-08-01"
data = "GET\n"
data += "ecs.amazonaws.com\n"
data += "/onca/xml\n"
data += query
a = hmac.new(AWSSecret, data, hashlib.sha256)
signature = urllib2.quote(base64.encodestring(a.digest())[:-1])
url = "http://ecs.amazonaws.com/onca/xml?" + query + "&Signature=" + signature
# print "URL : ", url
url_obj = urllib2.urlopen(url)
data = url_obj.read()
book_info = getInfoFromXML(data)
logText( " - Title: ", book_info[0])
logText( " - Price: ", book_info[1])
storeInDB( (book_info[0], isbn_num, book_info[1]) )