本文整理汇总了Python中six.moves.urllib.request方法的典型用法代码示例。如果您正苦于以下问题:Python urllib.request方法的具体用法?Python urllib.request怎么用?Python urllib.request使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves.urllib
的用法示例。
在下文中一共展示了urllib.request方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _request
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def _request(self, arguments, timeout=None):
"""Assembles the url and performs a get request to
the MicroStrategy Task Service API
Args:
arguments (dict): Maps get key parameters to values
Returns:
str: the xml text response
"""
arguments.update(BASE_PARAMS)
request = self._base_url + six.moves.urllib.parse.urlencode(arguments)
logger.info("submitting request %s" % request)
try:
response = requests.get(request, timeout=timeout)
except requests.exceptions.Timeout as e:
raise MstrClientException(str(e))
if not response.ok:
raise MstrClientException(response.text)
logger.info("received response %s" % response.text)
return response.text
示例2: prefetch_metrics
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def prefetch_metrics(self, container_id):
"""Initiates requesting invoking `stats` for the specified container. If you invoke this, you must
also eventually invoke `get_metrics` with the same container. By invoking this first, the `get_metrics`
call will take less time when issuing many `stats` requests.
Whenever possible, you should first invoke this method for all containers whose metrics you wish to request
before any call to `get_metrics`.
The behavior is not well defined if you invoke `prefetch_metrics` multiple times for a container before
invoking `get_metrics` for it.
@param container_id: The id of the container to fetch.
@type container_id: str
"""
self.__lock.acquire()
try:
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
finally:
self.__lock.release()
示例3: __start_workers
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def __start_workers(self, count):
"""Start `count` worker threads that will fetch metrics results.
@param count: The number of threads to start.
@type count: int
"""
new_number_workers = min(self.__concurrency, count + self.__num_worker_threads)
for i in range(self.__num_worker_threads, new_number_workers):
x = threading.Thread(target=self.__worker)
# Set daemon so this thread does not need to be finished for the overall process to stop. This allows
# the process to terminate even if a `stats` request is still in-flight.
x.setDaemon(True)
x.start()
self.__num_worker_threads += 1
# For accounting purposes,we consider the thread idle until it actually has a container it is fetching.
self.__idle_workers_count += 1
示例4: setUp
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def setUp(self):
super(XMLAPIConnectorTest, self).setUp()
emc_share_driver = fakes.FakeEMCShareDriver()
self.configuration = emc_share_driver.configuration
xml_socket = mock.Mock()
xml_socket.read = mock.Mock(return_value=XML_CONN_TD.FAKE_RESP)
opener = mock.Mock()
opener.open = mock.Mock(return_value=xml_socket)
with mock.patch.object(url_request, 'build_opener',
mock.Mock(return_value=opener)):
self.XmlConnector = connector.XMLAPIConnector(
configuration=self.configuration, debug=False)
expected_calls = [
mock.call(XML_CONN_TD.req_auth_url(),
XML_CONN_TD.req_credential(),
XML_CONN_TD.req_url_encode()),
]
url_request.Request.assert_has_calls(expected_calls)
示例5: test_heal_vnf_instance_already_not_instantiated
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def test_heal_vnf_instance_already_not_instantiated(self,
mock_log, mock_get_lock):
vnf_package_vnfd = self._create_and_upload_vnf_package()
vnf_instance_data = fake_obj.get_vnf_instance_data(
vnf_package_vnfd.vnfd_id)
vnf_instance_data['instantiation_state'] =\
fields.VnfInstanceState.NOT_INSTANTIATED
vnf_instance = objects.VnfInstance(context=self.context,
**vnf_instance_data)
vnf_instance.create()
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
self.conductor.heal(self.context, vnf_instance, heal_vnf_req)
self.vnflcm_driver.heal_vnf.assert_not_called()
expected_log = ('Heal action cannot be performed on vnf %(id)s '
'which is in %(state)s state.')
mock_log.error.assert_called_once_with(expected_log,
{'id': vnf_instance.id,
'state': fields.VnfInstanceState.NOT_INSTANTIATED})
示例6: fetch_bibtex_by_fulltext_scholar
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def fetch_bibtex_by_fulltext_scholar(txt, assess_results=True):
import scholarly.scholarly
scholarly._get_page = _get_page_fast # remove waiting time
logger.debug(txt)
search_query = scholarly.search_pubs_query(txt)
# get the most likely match of the first results
results = list(search_query)
if len(results) > 1 and assess_results:
maxscore = 0
result = results[0]
for res in results:
score = _scholar_score(txt, res.bib)
if score > maxscore:
maxscore = score
result = res
else:
result = results[0]
# use url_scholarbib to get bibtex from google
if getattr(result, 'url_scholarbib', ''):
bibtex = scholarly._get_page(result.url_scholarbib).strip()
else:
raise NotImplementedError('no bibtex import linke. Make crossref request using title?')
return bibtex
示例7: test_check_phone_verification_error
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def test_check_phone_verification_error(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
phone_service.check.return_value = {'status': 'error'}
req_id = 'request-id'
# make request to verify first to initialize session
phone_service.verify.return_value = {
'request_id': req_id, 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '1234567890'})
r = self.app.get('/p/check_phone_verification', {'pin': '1234'})
assert_equal(r.json, {'status': 'error'})
phone_service.check.assert_called_once_with(req_id, '1234')
user = M.User.by_username('test-admin')
hash = user.get_tool_data('phone_verification', 'number_hash')
assert_equal(hash, None)
示例8: test_check_phone_verification_ok
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def test_check_phone_verification_ok(self, phone_service):
with h.push_config(config, **{'project.verify_phone': 'true'}):
phone_service.check.return_value = {'status': 'ok'}
req_id = 'request-id'
# make request to verify first to initialize session
phone_service.verify.return_value = {
'request_id': req_id, 'status': 'ok'}
r = self.app.get('/p/verify_phone', {'number': '11234567890'})
r = self.app.get('/p/check_phone_verification', {'pin': '1234'})
assert_equal(r.json, {'status': 'ok'})
phone_service.check.assert_called_once_with(req_id, '1234')
user = M.User.by_username('test-admin')
hash = user.get_tool_data('phone_verification', 'number_hash')
assert_equal(hash, '54c61c96d5d5aea5254c2d4f41508a938e5501b4')
示例9: call
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def call(self, url, **params):
url = six.moves.urllib.parse.urljoin(self.base_url, url)
if self.verbose:
log.info("Import API URL: %s", url)
params = self.sign(six.moves.urllib.parse.urlparse(url).path, list(params.items()))
while True:
try:
result = six.moves.urllib.request.urlopen(url, six.moves.urllib.parse.urlencode(params))
resp = result.read()
return json.loads(resp)
except six.moves.urllib.error.HTTPError as e:
e.msg += ' ({0})'.format(url)
if self.verbose:
error_content = e.read()
e.msg += '. Error response:\n' + error_content
raise e
except (six.moves.urllib.error.URLError, IOError):
if self.retry:
log.exception('Error making API request, will retry')
continue
raise
示例10: mtranslate_google
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def mtranslate_google(word):
import html.parser
import urllib.request
import urllib.parse
agent = {'User-Agent':
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"}
def unescape(text):
parser = html.parser.HTMLParser()
return (parser.unescape(text))
def translate(to_translate, to_language="auto", from_language="auto"):
base_link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s"
to_translate = urllib.parse.quote(to_translate)
link = base_link % (to_language, from_language, to_translate)
request = urllib.request.Request(link, headers=agent)
raw_data = urllib.request.urlopen(request).read()
data = raw_data.decode("utf-8")
expr = r'class="t0">(.*?)<'
re_result = re.findall(expr, data)
if (len(re_result) == 0):
result = ""
else:
result = unescape(re_result[0])
return (result)
return [[word, translate(word, lang_to, lang_from)]], ['', '']
# reverso.net
示例11: calculate_token
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def calculate_token(self, text, seed=None):
""" Calculate the request token (`tk`) of a string
:param text: str The text to calculate a token for
:param seed: str The seed to use. By default this is the number of hours since epoch
"""
if seed is None:
seed = self._get_token_key()
[first_seed, second_seed] = seed.split(".")
try:
d = bytearray(text.encode('UTF-8'))
except UnicodeDecodeError:
# This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need
# to encode.
d = bytearray(text)
a = int(first_seed)
for value in d:
a += value
a = self._work_token(a, self.SALT_1)
a = self._work_token(a, self.SALT_2)
a ^= int(second_seed)
if 0 > a:
a = (a & 2147483647) + 2147483648
a %= 1E6
a = int(a)
return str(a) + "." + str(a ^ int(first_seed))
示例12: save
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def save(self, savefile):
""" Do the Web request and save to `savefile` """
with open(savefile, 'wb') as f:
self.write_to_fp(f)
示例13: write_to_fp
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def write_to_fp(self, fp):
""" Do the Web request and save to a file-like object """
for idx, part in enumerate(self.text_parts):
payload = { 'ie' : 'UTF-8',
'q' : part,
'tl' : self.lang,
'ttsspeed' : self.speed,
'total' : len(self.text_parts),
'idx' : idx,
'client' : 'tw-ob',
'textlen' : self._len(part),
'tk' : self.token.calculate_token(part)}
headers = {
"Referer" : "http://translate.google.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
}
if self.debug: print(payload)
try:
# Disable requests' ssl verify to accomodate certain proxies and firewalls
# Filter out urllib3's insecure warnings. We can live without ssl verify here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=headers,
proxies=urllib.request.getproxies(),
verify=False)
if self.debug:
print("Headers: {}".format(r.request.headers))
print("Request url: {}".format(r.request.url))
print("Response: {}, Redirects: {}".format(r.status_code, r.history))
r.raise_for_status()
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
except Exception as e:
raise
示例14: logRequest
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def logRequest(self, body, timeout=None, _async=True):
# Log the real request method
method = self.method
if not method:
method = body is not None and "POST" or "GET"
util.LOG(
"Starting request: {0} {1} (async={2} timeout={3})".format(method, util.cleanToken(self.url),
_async, timeout)
)
示例15: _get_cookie_crumb
# 需要导入模块: from six.moves import urllib [as 别名]
# 或者: from six.moves.urllib import request [as 别名]
def _get_cookie_crumb():
'''
This function perform a query and extract the matching cookie and crumb.
'''
global cookier, _cookie, _crumb
# Perform a Yahoo financial lookup on SP500
cookier.cookiejar.clear()
req = urllib.request.Request(
'https://finance.yahoo.com/quote/^GSPC', headers=_headers)
f = urllib.request.urlopen(req, timeout=5)
alines = f.read().decode('utf-8')
# Extract the crumb from the response
cs = alines.find('CrumbStore')
cr = alines.find('crumb', cs + 10)
cl = alines.find(':', cr + 5)
q1 = alines.find('"', cl + 1)
q2 = alines.find('"', q1 + 1)
crumb = alines[q1 + 1:q2]
_crumb = crumb
# Extract the cookie from cookiejar
for c in cookier.cookiejar:
if c.domain != '.yahoo.com':
continue
if c.name != 'B':
continue
_cookie = c.value
# Print the cookie and crumb
#print('Cookie:', _cookie)
#print('Crumb:', _crumb)