本文整理匯總了Python中urllib.request.add_header方法的典型用法代碼示例。如果您正苦於以下問題:Python request.add_header方法的具體用法?Python request.add_header怎麽用?Python request.add_header使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類urllib.request
的用法示例。
在下文中一共展示了request.add_header方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_web_page
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def get_web_page(url, headers, cookies):
try:
logging.info(f'Fetching {url}')
request = urllib.request.Request(url, None, headers)
request.add_header('Authorization', cookies)
response = urllib.request.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
f = gzip.GzipFile(fileobj=buf)
r = f.read()
else:
r = response.read()
return r
except urllib.error.HTTPError as e:
logging.info(f"Error processing webpage: {e}")
if e.code == ALREADY_CLICKED_CODE:
return ALREADY_CLICKED_CODE
if e.code == UNAUTHORIZED:
return UNAUTHORIZED
return None
示例2: makePut
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def makePut(self,url):
"""Puts data to Canvas (passes token as header)"""
try:
# Tack on http://.../ to the beginning of the url if needed
if self.CANVAS_API not in url:
urlString = self.CANVAS_API+url
else:
urlString = url
print("Putting: " +urlString)
request = urllib.request.Request(urlString, method='PUT')
request.add_header("Authorization", "Bearer " + self.CANVAS_TOKEN);
response = urllib.request.urlopen(request)
json_string = response.read().decode('utf-8');
retVal = dict(json.loads(json_string))
#print (retVal)
if(response.status == 200):
return True
else:
return False
except Exception as ex:
print(ex)
e = sys.exc_info()[0]
print(e)
raise
示例3: get_blob
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def get_blob(self, thread_id, blob_id):
"""Returns a file-like object with the contents of the given blob from
the given thread.
The object is described in detail here:
https://docs.python.org/2/library/urllib2.html#urllib2.urlopen
"""
request = Request(
url=self._url("blob/%s/%s" % (thread_id, blob_id)))
if self.access_token:
request.add_header("Authorization", "Bearer " + self.access_token)
try:
return urlopen(request, timeout=self.request_timeout)
except HTTPError as error:
try:
# Extract the developer-friendly error message from the response
message = json.loads(error.read().decode())["error_description"]
except Exception:
raise error
raise QuipError(error.code, message, error)
示例4: _fetch_json
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def _fetch_json(self, path, post_data=None, **args):
request = Request(url=self._url(path, **args))
if post_data:
post_data = dict((k, v) for k, v in post_data.items()
if v or isinstance(v, int))
request_data = urlencode(self._clean(**post_data))
if PY3:
request.data = request_data.encode()
else:
request.data = request_data
if self.access_token:
request.add_header("Authorization", "Bearer " + self.access_token)
try:
return json.loads(
urlopen(
request, timeout=self.request_timeout).read().decode())
except HTTPError as error:
try:
# Extract the developer-friendly error message from the response
message = json.loads(error.read().decode())["error_description"]
except Exception:
raise error
raise QuipError(error.code, message, error)
示例5: download
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def download(self, source: str, destination: str) -> int:
destination_path: str = os.path.join(DATA_BASE_PATH, destination)
url: str = os.path.join(STORAGE_BASE_URL, source)
request = urllib.request.Request(url)
request.add_header('User-Agent', self.USERAGENT)
response = urllib.request.urlopen(request)
with open(destination_path, 'wb') as output:
filesize: int = 0
while source:
chunk = response.read(self.CHUNK_SIZE)
if not chunk:
break
filesize += len(chunk)
output.write(chunk)
# assume that we always distribute data as .tar.xz archives
with lzma.open(destination_path) as f:
with tarfile.open(fileobj=f) as tar:
tar.extractall(os.path.dirname(destination_path))
return filesize
示例6: ip_adress_proxies
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def ip_adress_proxies(url='https://www.ip-adress.com/proxy_list/'):
# Downloading without proxy
opener = urllib.request.build_opener(urllib.request.ProxyHandler())
urllib.request.install_opener(opener)
request = urllib.request.Request(url)
request.add_header('user-agent', USER_AGENT)
parsed_uri = urlparse(url)
host = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
request.add_header('referer', host)
s = False
try:
context = ssl._create_unverified_context()
with urlopen(request, context=context, timeout=3000) as response:
s = response.read().decode('utf-8')
except Exception as er:
print(er)
pattern = r'\d*\.\d*\.\d*\.\d*\</a>:\d*'
found = [i.replace('</a>', '') + '\n' for i in re.findall(pattern, s)]
return found
示例7: downloadUrls
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def downloadUrls(self, urls):
url_data = {}
for u in urls:
url = self.base_url + u
request = urllib.request.Request(url)
# the .htaccess file checks for the header, and if it exists returns unprocessed data.
request.add_header('User-agent', 'our-web-crawler')
try:
response = urllib.request.urlopen(request)
data = response.read()
except urllib.request.HTTPError:
log (url)
raise
except urllib.request.URLError:
log (url)
raise
yield (u,data)
示例8: perform_metadata_exchange
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def perform_metadata_exchange(self, endpoint, xaddr):
if not (xaddr.startswith('http://') or xaddr.startswith('https://')):
logger.debug('invalid XAddr: {}'.format(xaddr))
return
host = None
url = xaddr
if self.mch.family == socket.AF_INET6:
host = '[{}]'.format(url.partition('[')[2].partition(']')[0])
url = url.replace(']', '%{}]'.format(self.mch.interface.name))
body = self.build_getmetadata_message(endpoint)
request = urllib.request.Request(url, data=body, method='POST')
request.add_header('Content-Type', 'application/soap+xml')
request.add_header('User-Agent', 'wsdd')
if host is not None:
request.add_header('Host', host)
try:
with urllib.request.urlopen(request, None, 2.0) as stream:
self.handle_metadata(stream.read(), endpoint, xaddr)
except urllib.error.URLError as e:
logger.warn('could not fetch metadata from: {}'.format(url, e))
示例9: put
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def put(self, location, params=None):
"""Dispatch a PUT request to a SeaMicro chassis.
The seamicro box has order-dependent HTTP parameters, so we build
our own get URL, and use a list vs. a dict for data, as the order is
implicit.
"""
opener = urllib.request.build_opener(urllib.request.HTTPHandler)
url = self.build_url(location, params)
request = urllib.request.Request(url)
request.get_method = lambda: "PUT"
request.add_header("content-type", "text/json")
response = opener.open(request)
json_data = self.parse_response(url, response)
return json_data["result"]
示例10: getHtml
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def getHtml(url, headers=None, encode=None, maxError=3, timeout=10):
error = 0
while error < maxError:
try:
if not headers:
headers = c_spider.defaultHeaders
headers.__setitem__('Referer', url)
request = urllib.request.Request(url)
for key in headers:
request.add_header(key, headers[key])
response = urllib.request.urlopen(request, timeout=timeout)
html = response.read()
if encode:
return html.decode(encode)
else:
return html
except:
error += 1
# 獲取網頁源代碼
示例11: plugin
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def plugin(srv, item):
""" addrs: (node, name) """
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
url = item.config['url']
apikey = item.config['apikey']
timeout = item.config['timeout']
node = item.addrs[0]
name = item.addrs[1]
value = item.payload
try:
params = { 'apikey': apikey, 'node': node, 'json': json.dumps({ name : value }) }
resource = url + '/input/post.json?' + urllib.parse.urlencode(params)
request = urllib.request.Request(resource)
request.add_header('User-agent', srv.SCRIPTNAME)
response = urllib.request.urlopen(request, timeout=timeout)
data = response.read()
except Exception as e:
srv.logging.warn("Failed to send GET request to EmonCMS using %s: %s" % (resource, e))
return False
return True
示例12: get_raw
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def get_raw(self, url):
# print("Raw request:", url)
request = urllib.request.Request(url)
context = ssl._create_unverified_context()
# setup private request headers if appropriate
if self._engine.token != None:
if self._engine.name == "gitlab":
request.add_header('PRIVATE-TOKEN',self._engine.token)
else:
if self._verbose: print("Tokens not setup for engine yet")
# run the request
try:
result = urllib.request.urlopen(request,context=context)
except urllib.error.HTTPError as e:
self._error = "HTTP error"
self._error_msg = str(e.code)
self._update_ready = None
except urllib.error.URLError as e:
self._error = "URL error, check internet connection"
self._error_msg = str(e.reason)
self._update_ready = None
return None
else:
result_string = result.read()
result.close()
return result_string.decode()
# result of all api calls, decoded into json format
示例13: _make_request
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def _make_request(self, url):
request = urllib.request.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36')
with urllib.request.urlopen(request, timeout=5) as response:
html = response.read()
return html
示例14: __send_request
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def __send_request(self, method, uri, data):
url = self.__url + uri
request = urllib.request.Request(url)
if (method == 'POST'):
request.data = bytes(json.dumps(data), 'utf-8')
auth = str(
base64.b64encode(
bytes('%s:%s' % (self.user, self.password), 'utf-8')
),
'ascii'
).strip()
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
e = None
try:
response = urllib.request.urlopen(request).read()
except urllib.error.HTTPError as ex:
response = ex.read()
e = ex
if response:
result = json.loads(response.decode())
else:
result = {}
if e != None:
if result and 'error' in result:
error = '"' + result['error'] + '"'
else:
error = 'No additional error message received'
raise APIError('TestRail API returned HTTP %s (%s)' %
(e.code, error))
return result
示例15: start_cron_threads
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import add_header [as 別名]
def start_cron_threads():
"""Start threads to trigger essential cron jobs."""
request_timeout = 10 * 60 # 10 minutes.
def trigger(interval_seconds, target):
"""Trigger a cron job."""
while True:
time.sleep(interval_seconds)
try:
url = 'http://{host}/{target}'.format(
host=constants.CRON_SERVICE_HOST, target=target)
request = urllib.request.Request(url)
request.add_header('X-Appengine-Cron', 'true')
response = urllib.request.urlopen(request, timeout=request_timeout)
response.read(60) # wait for request to finish.
except Exception:
continue
crons = (
(90, 'cleanup'),
(60, 'triage'),
(6 * 3600, 'schedule-progression-tasks'),
(12 * 3600, 'schedule-corpus-pruning'),
)
for interval, cron in crons:
thread = threading.Thread(target=trigger, args=(interval, cron))
thread.daemon = True
thread.start()