本文整理汇总了Python中urllib.request.add_header函数的典型用法代码示例。如果您正苦于以下问题:Python add_header函数的具体用法?Python add_header怎么用?Python add_header使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add_header函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
def post(build: Build):
if not SLACK_NOTIFICATION and SLACK_NOTIFICATION_URL and AUR_PACKAGER_BASE_URL:
return
detail_url = AUR_PACKAGER_BASE_URL + str(reverse_lazy('manager:build_detail',
kwargs={'package_name': build.package.name,
'build_number': 1}))
base = '<{}|{}> {}: <{}|{}>'.format(
package_url(aur_server_tag=build.package.server, package_name=build.package.name), build.package.name,
build.version, detail_url, build.status)
if build.status == Build.SUCCESS:
emoji = ':+1:'
sha256s = json.loads(build.sha256)
artifacts = []
for artifact in Artifact.objects.filter(package=build.package):
download_url = AUR_PACKAGER_BASE_URL + str(reverse_lazy('manager:build_download',
kwargs={'package_name': artifact.name,
'build_number': 1}))
sha256 = sha256s[artifact.name]
s = '<{}|:arrow_down: {}> sha256: {}'.format(download_url, artifact.name, sha256)
artifacts.append(s)
text = '\n'.join([base] + artifacts)
else:
emoji = ':ghost:'
text = base
name = '{}: {} {}'.format(build.status, build.package.name, build.version)
data = {'text': text, 'username': name, 'icon_emoji': emoji}
request = urllib.request.Request(SLACK_NOTIFICATION_URL)
request.add_header('Content-type', 'application/json')
try:
urllib.request.urlopen(request, json.dumps(data).encode())
except urllib.error.URLError:
pass
示例2: get_blob
def get_blob(self, thread_id, blob_id):
"""Return a file-like object with the contents of the given blob.
The object is described in detail here:
https://docs.python.org/2/library/urllib2.html#urllib2.urlopen
"""
request = urllib.request.Request(
url=self._url("blob/%s/%s" % (thread_id, blob_id)))
if self.access_token:
request.add_header("Authorization", "Bearer " + self.access_token)
try:
return urllib.request.urlopen(request,
timeout=self.request_timeout)
except urllib.request.HTTPError as error:
try:
# Extract the developer-friendly error message
message = json.loads(error.read())["error_description"]
except Exception:
raise error
if (self.retry_rate_limit and error.code == 503 and
message == "Over Rate Limit"):
# Retry later.
reset_time = float(error.headers.get("X-RateLimit-Reset"))
delay = max(2, reset_time - time.time() + 1)
logging.warning("Rate Limit, delaying for %d seconds" % delay)
time.sleep(delay)
return self.get_blob(thread_id, blob_id)
else:
raise QuipError(error.code, message, error)
示例3: __send_xml_str
def __send_xml_str(self, xml_str):
logger.debug("Sending: %s" % xml_str)
xml_data = urllib.parse.urlencode({'XML': xml_str})
request = urllib.request.Request(self.door_url(), xml_data)
base64string = base64.encodestring('%s:%s' % (self.door_user, self.door_pass)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
context = ssl._create_unverified_context()
context.set_ciphers('RC4-SHA')
self.lock.acquire()
try:
result = urllib.request.urlopen(request, context=context)
return_code = result.getcode()
return_xml = result.read()
result.close()
finally:
self.lock.release()
logger.debug("Response code: %d" % return_code)
logger.debug("Response: %s" % return_xml)
if return_code != 200:
raise Exception("Did not receive 200 return code")
error = get_attribute(return_xml, "errorMessage")
if error:
raise Exception("Received an error: %s" % error)
return return_xml
示例4: _defaultFetcher
def _defaultFetcher(url):
"""Retrieve data from ``url``. cssutils default implementation of fetch
URL function.
Returns ``(encoding, string)`` or ``None``
"""
try:
request = urllib.request.Request(url)
request.add_header('User-agent',
'cssutils %s (http://www.cthedot.de/cssutils/)' % VERSION)
res = urllib.request.urlopen(request)
except urllib.error.HTTPError as e:
# http error, e.g. 404, e can be raised
log.warn('HTTPError opening url=%s: %s %s' %
(url, e.code, e.msg), error=e)
except urllib.error.URLError as e:
# URLError like mailto: or other IO errors, e can be raised
log.warn('URLError, %s' % e.reason, error=e)
except OSError as e:
# e.g if file URL and not found
log.warn(e, error=OSError)
except ValueError as e:
# invalid url, e.g. "1"
log.warn('ValueError, %s' % e.args[0], error=ValueError)
else:
if res:
mimeType, encoding = encutils.getHTTPInfo(res)
if mimeType != 'text/css':
log.error('Expected "text/css" mime type for url=%r but found: %r' %
(url, mimeType), error=ValueError)
content = res.read()
if hasattr(res, 'close'):
res.close()
return encoding, content
示例5: query
def query(resource, mbid, includes=[]):
"""Queries MusicBrainz' web service for *resource* with *mbid* and the given list of includes.
Returns an LXML ElementTree root node. All namespaces are removed from the result.
"""
url = '{}/{}/{}'.format(wsURL, resource, mbid)
if queryCallback:
queryCallback(url)
if len(includes) > 0:
url += '?inc={}'.format('+'.join(includes))
logging.debug(__name__, 'querying {}'.format(url))
ans = db.query("SELECT xml FROM {}musicbrainzqueries WHERE url=?".format(db.prefix), url)
try:
data = ans.getSingle()
except db.EmptyResultException:
try:
request = urllib.request.Request(url)
request.add_header('User-Agent',
'Maestro/0.4.0 (https://github.com/maestromusic/maestro)')
with urllib.request.urlopen(request) as response:
data = response.read()
except urllib.error.HTTPError as e:
if e.code == 404:
raise e
else:
raise ConnectionError(e.msg)
db.query("INSERT INTO {}musicbrainzqueries (url, xml) VALUES (?,?)"
.format(db.prefix), url, data)
root = etree.fromstring(data)
# remove namespace tags
for node in root.iter():
if node.tag.startswith('{'):
node.tag = node.tag.rsplit('}', 1)[-1]
return root
示例6: glsrequest
def glsrequest(uri, method, data=None):
'''
Returns xml node tree as Element instance.
'uri' may be absolute or relative to _BASEURI.
'method' in ('GET', 'POST', 'PUT')
'data' can be a string or Element instance
'''
if method not in {'GET', 'POST', 'PUT'}:
raise GlslibException(MSGUNSUPPORTEDMETHOD % method)
if not uri.startswith(_BASEURI):
uri = _BASEURI.rstrip('/') + '/' + uri.lstrip('/')
request = urllib.request.Request(uri)
request.add_header("Authorization", "Basic %s" % _AUTHSTR)
if etree.iselement(data):
# tostring generates bytestring (as required for data)
data = etree.tostring(data)
request.add_header('Content-Type', 'application/xml')
request.add_data(data)
request.get_method = lambda: method
msg = '%s %s\n%s\n%s' % (request.get_method(),
request.get_full_url(),
request.headers,
data.decode('utf-8') if data else '')
logger.debug(msg)
try:
r = urllib.request.urlopen(request)
return etree.XML(r.read())
except urllib.error.HTTPError as httperr:
logger.error(httperr.read())
raise
except urllib.error.URLError as urlerr:
logger.error(request.get_full_url())
raise
示例7: get_recent_jobs
def get_recent_jobs(self, n_jobs=10):
"""
Returns the user's N most recently submitted jobs on the GenePattern server.
Args: If not specified, n_jobs = 10.
Returns: An array of GPJob objects.
"""
# Query the server for the list of jobs
request = urllib.request.Request(self.url + '/rest/v1/jobs/?pageSize=' +
str(n_jobs) + '&userId=' + str(urllib.parse.quote(self.username)) +
'&orderBy=-dateSubmitted')
if self.authorization_header() is not None:
request.add_header('Authorization', self.authorization_header())
request.add_header('User-Agent', 'GenePatternRest')
response = urllib.request.urlopen(request)
response_string = response.read().decode('utf-8')
response_json = json.loads(response_string)
# For each job in the JSON Array, build a GPJob object and add to the job list
job_list = []
for job_json in response_json['items']:
job_id = job_json['jobId']
job = GPJob(self, job_id)
job.info = job_json
job.load_info()
job_list.append(job)
return job_list
示例8: sendRequest
def sendRequest(self, path, data = {}, token = True, post = True, headers = {}):
response = None
request = None
if post:
headers['Content-Type'] = 'application/xml; charset=UTF-8'
if token:
headers['Authorization'] = '%s' % self.token
try:
if post:
request = urllib.request.Request(self.apiURL+path, data.encode('utf8'))
elif len(data) == 0:
request = urllib.request.Request(self.apiURL+path)
else:
print('I have data in sendRequest but i don\'t know what i should do with it :D')
if request is not None:
for k,v in headers.items():
request.add_header(k, v)
response = urllib.request.urlopen(request)
except urllib.error.HTTPError as e:
print('Error while requesting API call: %s (%s)' % (e.msg, e.code))
print('URL: %s' % (self.apiURL+path))
except urllib.error.URLError as e:
print('Error while requesting API call: %s' % (e.reason))
return response
示例9: _request
def _request(self, method, url, get=None, post=None, auth=False):
if get:
url = "{}?{}".format(url, urllib.parse.urlencode(get))
if post:
post = urllib.parse.urlencode(post).encode('utf-8')
request = urllib.request.Request(self.url + url, post)
request.get_method = lambda: method
if auth:
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('Authorization', '{0} {1}'.format(
self._get_userconfig('token_type').capitalize(),
self._get_userconfig('access_token'),
))
try:
response = self.opener.open(request, timeout = 10)
return json.loads(response.read().decode('utf-8'))
except urllib.request.HTTPError as e:
if e.code == 400:
raise utils.APIError("Invalid PIN. It is either probably expired or meant for another application.")
else:
raise utils.APIError("Connection error: %s" % e)
except socket.timeout:
raise utils.APIError("Connection timed out.")
示例10: retrieve_page
def retrieve_page(dbinfo, url):
"""
Retrieve a web page, with retries if necessary.
"""
crawl_delay = CRAWL_DELAY
html = ''
attempt = 1
while True:
try:
request = urllib.request.Request(url)
request.add_header(
'User-Agent',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) ' +
'Gecko/20100101 Firefox/21.0') )
request.add_header(
'Accept',
('text/html,application/xhtml+xml,application/xml;' +
'q=0.9,*/*;q=0.8') )
html = urllib.request.urlopen(request).read().decode('utf-8')
return html
except:
if attempt >= RETRY_ATTEMPTS:
log(dbinfo, 'ERROR',
'Error retrieving web page, too many retries: ' + url)
return None
else:
log(dbinfo, 'WARNING',
'Problem retrieving web page, retrying: ' + url)
sleep(crawl_delay)
crawl_delay = crawl_delay * 2
attempt += 1
示例11: make_call
def make_call(api_url, query_args=None):
# api_url is expected to be the fully constructed URL, with any needed
# arguments appended.
# This function will simply make the call, and return the response as
# an ElementTree object for parsing. If response cannot be parsed
# because it is not valid XML, this function assumes an API error and
# raises an APIException, passing forward the pages contents (which
# generally gives some indication of the error.
if query_args is not None:
get_params = urlencode_no_plus.urlencode_no_plus(query_args)
request = urllib.request.Request(api_url + '%s' % get_params)
else:
request = urllib.request.Request(api_url)
# Added these readers to avoid some weird errors from the host.
request.add_header('Referer', 'http://thegamesdb.net/')
request.add_header('User-agent', 'Mozilla/5.0')
response = urllib.request.urlopen(request)
page = response.read()
# Make sure the XML Parser doesn't return a ParsError. If it does,
# it's probably and API Issue, so raise an exception, printing the
# response from the API call.
try:
xml_response = ET.fromstring(page)
except ET.ParseError:
raise APIException(page)
return xml_response
示例12: get_credit
def get_credit(opener):
url = emuch_url + '/memcp.php?action=getcredit'
values = {'formhash': '2c8099cd',
'getmode': '1', #2
'message': '',
'creditsubmit': b'\xc1\xec\xc8\xa1\xba\xec\xb0\xfc' #u'领取红包'.encode('gbk')
}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
request = urllib.request.Request(url, data)#, method='POST')
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header('User-Agent', user_agent)
print_log('try to get credit...')
r = opener.open(request)
body = r.read().decode('gbk')
info = [b'\xb9\xa7\xcf\xb2\xa3\xa1\xc4\xe3\xbb\xf1\xb5\xc3'.decode('gbk'), #u'恭喜!你获得'
b'\xbd\xf1\xcc\xec\xb5\xc4\xba\xec\xb0\xfc\xa3\xac\xc4\xfa\xd2\xd1\xbe\xad\xc1\xec\xc8\xa1\xc1\xcb\xa3\xac\xd2\xbb\xcc\xec\xbe\xcd\xd2\xbb\xb4\xce\xbb\xfa\xbb\xe1'.decode('gbk'),#u'今天的红包,您已经领取了,一天就一次机会'
'',
]
msgs = ['get credit successfully!', 'can not get twice!', 'undefined error!']
#out_html(body, "get_credit")
for i, s in enumerate(info):
if s in body:
print_log(msgs[i])
return i
示例13: get_msgbox
def get_msgbox(opener):
url = emuch_url + '/box.php'
request = urllib.request.Request(url)
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header('User-Agent', user_agent)
return opener.open(request)
示例14: login_emuch
def login_emuch(opener, username, passwd):
url = emuch_url + '/logging.php?action=login'
values = {'formhash': 'f6ac2e8a',
'referer': 'http://emuch.net/bbs/index.php',
'username': username,
'password': passwd,
'cookietime': '31536000',
'loginsubmit': b'\xbb\xe1\xd4\xb1\xb5\xc7\xc2\xbc' #u'会员登录'.encode('gbk')
}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
request = urllib.request.Request(url, data)#, method='POST')
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
request.add_header('User-Agent', user_agent)
r = opener.open(request)
body = r.read().decode('gbk')
#out_html(body, 'login')
es = b'\xca\xe4\xc8\xeb\xb5\xc4\xd5\xca\xba\xc5\xc3\xdc\xc2\xeb\xb4\xed\xce\xf3\xa3\xac\xc7\xeb\xd6\xd8\xca\xd4'.decode('gbk')
#输入的帐号密码错误,请重试
f = es in body
print_log({0: "logined successfully!", 1 :'error usename or password!'}[f])
return not f
示例15: SendRequest
def SendRequest(host, session, requestString):
data=bytes(json.dumps({ "query" : requestString }), "ASCII")
request=urllib.request.Request(host + ":16742", data)
request.add_header("Cookie", "session=" + session)
response = json.loads(urllib.request.urlopen(request).readall().decode('ascii'))
#sys.stderr.write(response + "\n")
return response