本文整理汇总了Python中requests_futures.sessions.FuturesSession.get方法的典型用法代码示例。如果您正苦于以下问题:Python FuturesSession.get方法的具体用法?Python FuturesSession.get怎么用?Python FuturesSession.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类requests_futures.sessions.FuturesSession
的用法示例。
在下文中一共展示了FuturesSession.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_raw_data
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def _get_raw_data(self):
docktor_config = providers_config.providers['docktor']
apps = []
session = FuturesSession(max_workers=CONCURRENT_JOBS_LIMIT)
session.mount('https://', self.__requests_http_adapter)
session.mount('http://', self.__requests_http_adapter)
for stage in docktor_config:
for zone in docktor_config[stage]:
apps_uri = '{uri}/apps/'.format(uri=docktor_config[stage][zone]['uri'])
try:
r = session.get(apps_uri, timeout=REQUEST_TIMEOUT).result()
r.raise_for_status()
apps_list = r.json()
except ValueError as e:
logger.error("Non json response {} from {}-{} docktor".format(r.content, stage, zone))
raise e
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
future_apps_details = [session.get('{apps_uri}{app}'.format(apps_uri=apps_uri, app=app), timeout=REQUEST_TIMEOUT) for app in apps_list]
try:
apps_details = [a.result() for a in future_apps_details]
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
partial_get_app_info = partial(self.get_app_info, stage, zone)
apps.extend(map(lambda a: partial_get_app_info(a), apps_details))
return apps
示例2: CustomStreamListener
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, socketio, track):
super(CustomStreamListener, self).__init__()
self.socketio = socketio
self.room = track
self.session = FuturesSession()
def get_geonames_username(self):
return "yasyf{}".format(random.randint(1,5))
def on_status(self, status):
if status.coordinates or status.author.location:
data = {'text': status.text.encode('utf-8')}
data.update({k:getattr(status.author, k) for k in ['time_zone', 'location']})
data.update({k:getattr(status, k) for k in ['lang', 'coordinates']})
def add_sentiment(session, response):
data['sentiment'] = response.json()['results']
self.socketio.emit('status', data, room=self.room)
def add_country_code(session, response):
try:
json = response.json()
if json['totalResultsCount'] > 0:
result = json['geonames'][0]
data['country'] = result['countryCode']
data['coordinates'] = {'coordinates': [float(result['lng']), float(result['lat'])]}
else:
return
except:
data['country'] = response.text.strip()
if TEST_MODE:
data['sentiment'] = random.random()
self.socketio.emit('status', data, room=self.room)
else:
url = "http://apiv2.indico.io/sentiment"
args = {'key': os.getenv('INDICOIO_API_KEY')}
self.session.post(url, data={'data': data['text']}, params=args, background_callback=add_sentiment)
if status.coordinates:
url = "http://ws.geonames.org/countryCode"
args = {'lat': status.coordinates['coordinates'][1], 'lng': status.coordinates['coordinates'][0],
'username': self.get_geonames_username()}
self.session.get(url, params=args, background_callback=add_country_code)
else:
url = "http://api.geonames.org/search"
args = {'q': status.author.location, 'username': self.get_geonames_username(),
'maxRows': 1, 'type': 'json'}
self.session.get(url, params=args, background_callback=add_country_code)
return True
def on_error(self, status_code):
print 'Encountered error with status code:', status_code
self.socketio.emit('error', {'status_code': status_code}, room=self.room)
return True
def on_timeout(self):
print 'Timeout...'
return True
示例3: add_list_new
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def add_list_new() -> None:
requester = FuturesSession(executor=ProcessPoolExecutor(30), session=requests.session())
api_key = settings.TBA_API_HEADERS
team_list_get = lambda p: requester.get(team_by_page_url_template(page=p), headers=api_key)
team_participation_get = lambda tn: requester.get(team_participation_url_template(team=tn), headers=api_key)
page_range = get_page_range()
print("\nStarting %d HTTP requests for team lists, split between %d processes..." % (
page_range[1] - page_range[0], requester.executor._max_workers))
team_list_futures = [team_list_get(p) for p in range(*page_range)]
print("Waiting...")
wait(team_list_futures)
print("Done!\n")
teams_lists = map(lambda f: f.result().json(), team_list_futures)
teams_data = [item for page_data in teams_lists for item in page_data]
team_numbers = [*map(lambda t: t['team_number'], teams_data)]
print("Starting %d HTTP requests for team participation data, split between %d processes..." % (
len(team_numbers), requester.executor._max_workers))
team_participation_futures = [team_participation_get(tn) for tn in team_numbers]
print("Waiting...")
wait(team_participation_futures)
print("Done!\n")
team_participations = map(lambda f: f.result().json(), team_participation_futures)
arg_list = zip(team_numbers, teams_data, team_participations)
for args in arg_list:
add_team(*args)
示例4: test_futures_session
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def test_futures_session(self):
# basic futures get
sess = FuturesSession()
future = sess.get(httpbin('get'))
self.assertIsInstance(future, Future)
resp = future.result()
self.assertIsInstance(resp, Response)
self.assertEqual(200, resp.status_code)
# non-200, 404
future = sess.get(httpbin('status/404'))
resp = future.result()
self.assertEqual(404, resp.status_code)
def cb(s, r):
self.assertIsInstance(s, FuturesSession)
self.assertIsInstance(r, Response)
# add the parsed json data to the response
r.data = r.json()
future = sess.get(httpbin('get'), background_callback=cb)
# this should block until complete
resp = future.result()
self.assertEqual(200, resp.status_code)
# make sure the callback was invoked
self.assertTrue(hasattr(resp, 'data'))
def rasing_cb(s, r):
raise Exception('boom')
future = sess.get(httpbin('get'), background_callback=rasing_cb)
with self.assertRaises(Exception) as cm:
resp = future.result()
self.assertEqual('boom', cm.exception.args[0])
示例5: bench_requests_futures_async
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def bench_requests_futures_async(number_reqs, nb_worker):
# https://pypi.python.org/pypi/requests-futures
l=[]
start = datetime.datetime.now()
print('Start : ', start)
def bg_cb(sess, resp):
# resp.text
if resp.status_code != requests.codes.ok:
print(resp.status_code)
resp.raise_for_status()
#print(dir(resp))
l.append(1)
l_size = len(l)
print(l_size)
#print(len(response.body))
if l_size == number_reqs:
tornado.ioloop.IOLoop.instance().stop()
if datetime.datetime.now() - start == 60:
tornado.ioloop.IOLoop.instance().stop()
session = FuturesSession( max_workers=10 )
for elem in range(int(number_reqs/nb_worker)):
for e in range(nb_worker):
session.get(
"http://www.leboncoin.fr/",
background_callback = bg_cb
)
time.sleep(1)
print('[Rq TURFU] Done :', datetime.datetime.now() - start)
示例6: fetchReviews
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def fetchReviews(unique_id):
s = FuturesSession()
# Hand shake proc. to figure out how many calls we send to server
api_format = 'https://watcha.net/comment/list?unique_id={unique_id}&start_index={start_index}&count=10&type=like'
handshake = api_format.format(unique_id=unique_id, start_index=str(0))
hs = s.get(handshake).result().content
json_hs = json.loads(hs)
total_count = int(json_hs['meta']['total_count'])
how_many_queries = total_count / 10 + 1
query_urls = [api_format.format(unique_id=unique_id, start_index=str(i * 10)) for i in xrange(0, how_many_queries, 1)]
reviews = [
{
'movie_title': r['movie_title'],
'rating': r['rating'],
'text': r['text'],
'updated_at': time.mktime(dateutil.parser.parse(r['updated_at']).timetuple()),
'comment_id': r['comment_id']
}
for qu in query_urls
for r in json.loads(s.get(qu).result().content)['data']
]
return reviews
示例7: test_redirect
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def test_redirect(self):
""" Tests for the ability to cleanly handle redirects. """
sess = FuturesSession()
future = sess.get(httpbin('redirect-to?url=get'))
self.assertIsInstance(future, Future)
resp = future.result()
self.assertIsInstance(resp, Response)
self.assertEqual(200, resp.status_code)
future = sess.get(httpbin('redirect-to?url=status/404'))
resp = future.result()
self.assertEqual(404, resp.status_code)
示例8: _chapter_pages
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def _chapter_pages(self, soup, html):
# a <select> tag has options that each points to a page
neighbour = soup.find('select', id='combobox').find_next_sibling('select')
opts = neighbour.find_all('option')
urls = [opt['value'] for opt in opts]
# Page 1 has already been fetched (stored in this html param, duh!)
# so let's save ourselves an http request
pages_htmls = [html]
urls = urls[1:]
session = FuturesSession()
for order, url in enumerate(urls):
uri = self.netlocs[2] + url
print(uri)
res = session.get(uri).result()
if res.status_code != 200:
raise HtmlError('cannot fetch')
pages_htmls.append(res.content)
returns = []
for page_html in pages_htmls:
soup = BeautifulSoup(page_html)
img_url = soup.find('img', id='mainImg')['src']
returns.append(img_url)
return returns
示例9: BlueFloodMetricsClient
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
class BlueFloodMetricsClient(object):
def __init__(self, token, project_id, executors):
self.token = token
self.project_id = project_id
self.session = FuturesSession(max_workers=executors)
self.headers = {
'X-Project-ID': self.project_id
}
if self.token:
self.headers.update({
'X-Auth-Token': self.token
})
self.session.headers.update(self.headers)
def async_requests(self, urls):
futures_results = []
for url in urls:
LOG.info("Request made to URL: {0}".format(url))
futures_results.append(self.session.get(url))
responses = []
for future in futures.as_completed(fs=futures_results):
resp = future.result()
LOG.info("Request completed to URL: {0}".format(resp.url))
responses.append((resp))
return responses
示例10: _chapter_pages
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def _chapter_pages(self, soup, html):
# For webtoons, all pages are shown in a single page.
# When that's the case, there's this element that asks if you want to
# view page-by-page instead. Let's use this element to check if we're
# parsing a webtoon chapter.
webtoon = soup.find("a", href="?supress_webtoon=t")
if webtoon is not None:
img_tags = soup.find_all(_page_img_tag)
return [tag["src"] for tag in img_tags]
# a <select> tag has options that each points to a page
opts = soup.find("select", id="page_select").find_all("option")
urls = [opt["value"] for opt in opts]
# Page 1 has already been fetched (stored in this html param, duh!)
# so let's save ourselves an http request
pages_htmls = [html]
urls = urls[1:]
session = FuturesSession()
for order, url in enumerate(urls):
res = session.get(url).result()
if res.status_code != 200:
raise HtmlError("cannot fetch")
pages_htmls.append(res.content)
returns = []
for page_html in pages_htmls:
soup = BeautifulSoup(page_html)
img_url = soup.find("img", id="comic_page")["src"]
returns.append(img_url)
return returns
示例11: get_blocks
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def get_blocks(*heights):
urls = [get_block_coinsecrets_url(h) for h in heights]
session = FuturesSession()
reqs = [session.get(url) for url in urls]
responses = [r.result() for r in reqs]
resps_json = [json.loads(r.content.decode()) for r in responses]
return resps_json
示例12: get_frames
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def get_frames(self, count):
"""Get a list of images from Environment Canada."""
soup = BeautifulSoup(requests.get(self.IMAGES_URL.format(self.station_code)).text, 'html.parser')
image_links = [tag['href'] for tag in soup.find_all('a') if '.gif' in tag['href']]
if len([i for i in image_links[:8] if 'COMP' in i]) > 4:
image_string = '_'.join([self.station_code, 'COMP_PRECIPET', self.get_precip_type() + '.gif'])
else:
image_string = '_'.join([self.station_code, 'PRECIPET', self.get_precip_type() + '.gif'])
images = [tag['href'] for tag in soup.find_all('a') if image_string in tag['href']]
futures = []
session = FuturesSession(max_workers=count)
for i in reversed(images[:count]):
url = self.FRAME_URL.format(self.station_code, i)
futures.append(session.get(url=url).result().content)
def add_layers(frame):
frame_bytesio = BytesIO()
base = Image.open(BytesIO(frame)).convert('RGBA')
base.alpha_composite(self.roads)
base.alpha_composite(self.cities)
base.save(frame_bytesio, 'GIF')
frame_bytesio.seek(0)
return frame_bytesio.read()
frames = [add_layers(f) for f in futures if f[0:3] == b'GIF']
"""Repeat last frame."""
for i in range(0, 2): # pylint: disable=unused-variable
frames.append(frames[count - 1])
return frames
示例13: search
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def search(self, q='', cat='', indexer='all', **kwargs):
self.logger.debug("Searching for %s category %s on indexer %s" % (q, cat, indexer))
if cat:
cat = '&cat=' + cat
sess = FuturesSession(max_workers=8)
job_list = []
if indexer == 'all':
for i in NewznabIndexers.select():
cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
u = i.apiurl
u += cmd
u = u.replace('o=json', 'o=xml')
job_list.append(u)
else:
for i in NewznabIndexers.select():
if i.name == indexer:
cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
u = i.apiurl
u += cmd
u = u.replace('o=json', 'o=xml')
job_list.append(u)
result = []
future = []
for url in job_list:
try:
self.logger.debug('Fetching search results from %s' % url)
t = sess.get(url, timeout=60, headers=self.headers)
except Exception as e:
self.logger.error('%s when fetching %s' % (e, url))
continue
future.append(t)
for future in cf.as_completed(future):
if future.exception() is not None:
self.logger.error('Failed to fetch results %s' % (future.exception()))
else:
f = []
res = future.result()
try:
provider_res = xmltodict.parse(res.content, attr_prefix='')
if provider_res:
if 'rss' in provider_res:
if 'channel' in provider_res['rss']:
if 'item' in provider_res['rss']['channel']:
f.append(provider_res['rss']['channel'])
if 'error' in provider_res:
self.logger.debug('%s %s' % (provider_res['rss']['channel']['title'], provider_res['error']['description']))
except Exception as e:
self.logger.error(res.url, e, exc_info=True)
result.append(f)
return result
示例14: get_usgs_nearby_cities
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def get_usgs_nearby_cities(self, earthquake):
"""
performs request on local earthquake nearby cities url and returns the data
"""
try:
nearest_cities_object = earthquake[
"properties"]["products"]["nearby-cities"]
nearest_cities_url = nearest_cities_object[0][
"contents"]["nearby-cities.json"]["url"]
except:
nearest_cities_url = None
if nearest_cities_url:
session = FuturesSession(max_workers=1)
nearest_cities_response = session.get(
nearest_cities_url, headers=app.config["API_MANAGER_HEADERS"])
nearest_cities_details = nearest_cities_response.result().json()
list_of_nearby_cities = []
for item in nearest_cities_details:
city = NearestCity(
id=None,
distance=item["distance"],
direction=item["direction"],
name=item["name"],
latitude=item["latitude"],
longitude=item["longitude"],
population=item["population"],
earthquake_id=None
)
list_of_nearby_cities.append(city)
earthquake["properties"]["nearest_cities_url"] = nearest_cities_url
earthquake["properties"]["nearest_cities"] = list_of_nearby_cities
else:
earthquake["properties"]["nearest_cities_url"] = None
earthquake["properties"]["nearest_cities"] = []
return earthquake
示例15: async_requests
# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import get [as 别名]
def async_requests(locations, site=None):
session = FuturesSession()
check_date = datetime.now() + timedelta(hours=-4)
for location in locations:
gig = Gigs.select().where(Gigs.location.contains(location)).order_by(Gigs.datetime.desc()).first()
if (gig is None) or ((datetime.strptime(gig.datetime, '%Y-%m-%d %H:%M') < check_date)):
url = "https://{}.craigslist.org/search/{}/".format(location, (site or CRAIGSLIST_SITE))
future = session.get(url, background_callback=insert_callback)