当前位置: 首页>>代码示例>>Python>>正文


Python FuturesSession.mount方法代码示例

本文整理汇总了Python中requests_futures.sessions.FuturesSession.mount方法的典型用法代码示例。如果您正苦于以下问题:Python FuturesSession.mount方法的具体用法?Python FuturesSession.mount怎么用?Python FuturesSession.mount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在requests_futures.sessions.FuturesSession的用法示例。


在下文中一共展示了FuturesSession.mount方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: APNsClient

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class APNsClient(object):
    def __init__(self, cert_file, use_sandbox=False, use_alternative_port=False):
        server = 'api.development.push.apple.com' if use_sandbox else 'api.push.apple.com'
        port = 2197 if use_alternative_port else 443
        self.cert = cert_file
        self.server = "https://{}:{}".format(server, port)
        self.__connection = FuturesSession()
        self.__connection.mount('https://', HTTP20Adapter())

    def send_notification(self, tokens, notification, priority=NotificationPriority.Immediate, topic=None):
        # print(notification.dict())
        json_payload = json.dumps(notification.dict(), ensure_ascii=False, separators=(',', ':')).encode('utf-8')

        headers = {
            'apns-priority': priority.value
        }
        if topic:
            headers['apns-topic'] = topic

        if not isinstance(tokens, list):
            tokens = [tokens]

        for token in tokens:
            url = '{}/3/device/{}'.format(self.server, token)
            self.__connection.post(url, json_payload, headers=headers, cert=self.cert, background_callback=req_callback)
开发者ID:yichengchen,项目名称:PyAPNs2,代码行数:27,代码来源:client.py

示例2: _get_raw_data

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
    def _get_raw_data(self):
        docktor_config = providers_config.providers['docktor']
        apps = []
        session = FuturesSession(max_workers=CONCURRENT_JOBS_LIMIT)
        session.mount('https://', self.__requests_http_adapter)
        session.mount('http://', self.__requests_http_adapter)
        for stage in docktor_config:
            for zone in docktor_config[stage]:
                apps_uri = '{uri}/apps/'.format(uri=docktor_config[stage][zone]['uri'])
                try:
                    r = session.get(apps_uri, timeout=REQUEST_TIMEOUT).result()
                    r.raise_for_status()
                    apps_list = r.json()
                except ValueError as e:
                    logger.error("Non json response {} from {}-{} docktor".format(r.content, stage, zone))
                    raise e
                except Exception as e:
                    logger.error("Exception raised on {}-{} docktor".format(stage, zone))
                    raise e

                future_apps_details = [session.get('{apps_uri}{app}'.format(apps_uri=apps_uri, app=app), timeout=REQUEST_TIMEOUT) for app in apps_list]

                try:
                    apps_details = [a.result() for a in future_apps_details]
                except Exception as e:
                    logger.error("Exception raised on {}-{} docktor".format(stage, zone))
                    raise e

                partial_get_app_info = partial(self.get_app_info, stage, zone)

                apps.extend(map(lambda a: partial_get_app_info(a), apps_details))
        return apps
开发者ID:hmrc,项目名称:wristband,代码行数:34,代码来源:providers.py

示例3: Scraper

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class Scraper(object):
    def __init__(self, url):
        self.url = url
        self.session = FuturesSession(max_workers=100)
        adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def _extract_image_urls(self, soup):
        for img in soup.findAll("img", src=True):
            yield urljoin(self.url, img["src"])

    def _find_thumbnail_image(self):
        content_type, content = _fetch_url(self.url, session=self.session)
        soup = BeautifulSoup(content)
        image_urls = self._extract_image_urls(soup)
        image_urls = [u for u in image_urls] # turn to list
        image_urls = list(set(image_urls)) # lose duplicates
        image_sizes = _parallel_get_sizes(image_urls, self.session)
        logging.debug('got sizes for {} images'.format(len(image_sizes)))
        # find biggest
        max_area = 0
        max_url = None
        for image_url in image_urls:
            size = image_sizes[image_url]
            if not size:
                continue

            # ignore little images
            area = size[0] * size[1]
            if area < 5000:
                logging.debug('ignore little {}'.format(image_url))
                continue

            # ignore excessively long/wide images
            if max(size) / min(size) > 1.5:
                logging.debug('ignore dimensions {}'.format(image_url))
                continue

            # penalize images with "sprite" in their name
            if 'sprite' in image_url.lower():
                logging.debug('penalizing sprite {}'.format(image_url))
                area /= 10

            if area > max_area:
                max_area = area
                max_url = image_url
        return max_url


    def scrape(self):
        thumbnail_url = self._find_thumbnail_image()
        #thumbnail = _make_thumbnail_from_url(thumbnail_url, referer=self.url)
        return thumbnail_url
开发者ID:metakermit,项目名称:posterwall,代码行数:56,代码来源:scraper.py

示例4: CrashInfo

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class CrashInfo(object):

    # TODO: count is probably erroneous since there is a range by default in supersearch...
    CRASH_STATS_URL = 'https://crash-stats.mozilla.com'
    SUPERSEARCH_URL = CRASH_STATS_URL + '/api/SuperSearch'
    TIMEOUT = 5
    MAX_RETRIES = 5
    MAX_WORKERS = multiprocessing.cpu_count()

    def __init__(self, paths, credentials = None):
        self.results = [ ]
        self.credentials = credentials
        self.info = { }
        self.paths = [paths] if type(paths) == str else paths 
        for path in self.paths:
            self.info[path] = { 'crashes': -1 }
                                
        self.session = FuturesSession(max_workers = self.MAX_WORKERS)
        self.session.mount(self.CRASH_STATS_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))
        self.__get_info()

    def get(self):
        for r in self.results:
            r.result()
        return self.info

    def __get_apikey(self):
        if self.credentials:
            return self.credentials['tokens'][self.CRASH_STATS_URL]
        else:
            return ''
    
    def __info_cb(self, path):
        def cb(sess, res):
            self.info[path]['crashes'] = res.json()['total']

        return cb

    def __get_info(self):
        header = { 'Auth-Token': self.__get_apikey() }
        for path in self.paths:
            filename = os.path.basename(path)
            self.results.append(self.session.get(self.SUPERSEARCH_URL,
                                                 params = { 'product': 'Firefox',
                                                            'topmost_filenames': filename, 
                                                            '_results_number': 0,
                                                            '_facets': 'product',
                                                            '_facets_size': 1 },
                                                 headers = header,
                                                 timeout = self.TIMEOUT,
                                                 background_callback = self.__info_cb(path)))
开发者ID:lizzard,项目名称:clouseau,代码行数:53,代码来源:CrashInfo.py

示例5: FXRevision

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class FXRevision(object):

    ARCHIVES_URL = 'http://archive.mozilla.org'
    NIGHTLY_URL = ARCHIVES_URL + '/pub/firefox/nightly/'
    TIMEOUT = 5
    MAX_RETRIES = 5

    def __init__(self, versions, fx_version, os):
        self.results = [ ]
        self.dates = { }
        self.fx_version = fx_version
        self.os = os
        self.info = { }
        pattern = re.compile('([0-9]{4})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})')
        for version in versions:
            m = pattern.search(version)
            self.dates[version] = [m.group(i) for i in range(1, 7)]

        self.session = FuturesSession()
        self.session.mount(self.ARCHIVES_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))
        self.__get_info()

    def get(self):
        for r in self.results:
            r.result()
        return self.info
        
    def __make_url(self, l):
        return self.NIGHTLY_URL + l[0] + '/' + l[1] + '/' + '-'.join(l) + '-mozilla-central/firefox-' + self.fx_version + '.en-US.' + self.os + '.json'

    def __info_cb(self, sess, res):
        json = res.json()
        self.info[json['buildid']] = json['moz_source_stamp']
    
    def __get_info(self):
        for date in self.dates.itervalues():
            self.results.append(self.session.get(self.__make_url(date),
                                                 timeout = self.TIMEOUT,
                                                 background_callback = self.__info_cb))
开发者ID:lizzard,项目名称:clouseau,代码行数:41,代码来源:FXRevision.py

示例6: HTTPDriver

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class HTTPDriver(BaseDriver):
  """HTTPDriver

  The :class:`HTTPDriver` class reads SBP messages from an HTTP
  service for a device and writes out to a stream. This driver is like
  a file-handle with read and writes over two separately HTTP
  connections, but can also be enabled and disabled by its consumer.

  Parameters
  ----------
  device_uid : uid
    Device unique id
  url : str
    HTTP endpoint
  retries : tuple
    Configure connect and read retry count. Defaults to
    (MAX_CONNECT_RETRIES, MAX_READ_RETRIES).
  timeout : tuple
    Configure connect and read timeouts. Defaults to
    (DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT).

  """

  def __init__(self,
               device_uid=None,
               url="https://broker.staging.skylark.swiftnav.com",
               retries=DEFAULT_RETRIES,
               timeout=DEFAULT_TIMEOUT,):
    self._retry = Retry(connect=DEFAULT_RETRIES[0],
                        read=DEFAULT_RETRIES[1],
                        redirect=MAX_REDIRECTS,
                        status_forcelist=[500],
                        backoff_factor=DEFAULT_BACKOFF_FACTOR)
    self.url = url
    self.read_session = requests.Session()
    self.read_session.mount("http://",
                            HTTPAdapter(pool_connections=DEFAULT_POOLSIZE,
                                        pool_maxsize=DEFAULT_POOLSIZE,
                                        pool_block=DEFAULT_POOLBLOCK,
                                        max_retries=self._retry))
    self.read_session.mount("https://",
                            HTTPAdapter(pool_connections=DEFAULT_POOLSIZE,
                                        pool_maxsize=DEFAULT_POOLSIZE,
                                        pool_block=DEFAULT_POOLBLOCK,
                                        max_retries=self._retry))
    self.write_session = None
    self.device_uid = device_uid
    self.timeout = timeout
    self.read_response = None
    self.write_response = None
    self.source = None

  def flush(self):
    """File-flush wrapper (noop).

    """
    pass

  def close(self):
    """File-handle close wrapper (noop).

    """
    try:
      self.read_close()
      self.write_close()
    except:
      pass

  @property
  def write_ok(self):
    """
    Are we connected for writes?
    """
    # Note that self.write_response is either None or a Response
    # object, which cast to False for 4xx and 5xx HTTP codes.
    return bool(self.write_response)

  def connect_write(self, source, whitelist, device_uid=None, pragma=None):
    """Initialize a streaming write HTTP response. Manually connects the
    underlying file-handle. In the event of a network disconnection,
    use to manually reinitiate an HTTP session.

    Parameters
    ----------
    source : sbp.client.handler.Handler
      Iterable source of SBP messages.
    whitelist : [int]
      Whitelist of messages to write

    """
    header_device_uid = device_uid or self.device_uid
    headers = {'Device-Uid': header_device_uid, 'Content-Type': BROKER_SBP_TYPE, 'Pragma': pragma}
    if not pragma:
      del headers['Pragma']
    try:
      self.executor = ThreadPoolExecutor(max_workers=DEFAULT_POOLSIZE)
      self.write_session = FuturesSession(executor=self.executor)
      self.write_session.mount("http://",
                               HTTPAdapter(pool_connections=DEFAULT_POOLSIZE,
                                           pool_maxsize=DEFAULT_POOLSIZE,
#.........这里部分代码省略.........
开发者ID:wltr,项目名称:libsbp,代码行数:103,代码来源:network_drivers.py

示例7: cert_verify

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
    it is simply not checked
    
    Generously provided by Juan Luis Boya
    """
    def cert_verify(self, conn, *args, **kwargs):
        """
        Avoids the verification of the SSL Hostname field

        :param Connection conn: The connection object
        """
        super(NotCheckingHostnameHTTPAdapter, self).cert_verify(conn, *args, **kwargs)
        conn.assert_hostname = False

# By changing the adapter no hostname is checked
futures_session = FuturesSession()
futures_session.mount('https://', NotCheckingHostnameHTTPAdapter())

#Creation of the temporal directory if it does not exists
if not os.path.exists(conf.TMPDIR):
    os.makedirs(conf.TMPDIR)

__UPLOADS__ = conf.TMPDIR # temporal directory were files will be stored

open_ws = set() #Set of the current alive websockets

class BaseHandler(RequestHandler):
    """
    The base class which the rest of HTTP handlers extends.
    Provides secure cookie decryption and error handling
    """
    def get_current_user(self):
开发者ID:Alternhuman,项目名称:deployer,代码行数:33,代码来源:deployer.py

示例8: again

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
)
# Enable For Debugging:
logging.disable(logging.INFO)

DELAY = .05  # Second delay between calculating trades.
RGAP = .005 # Max gap before cancelling a robux split trade
TGAP = .0025 # Max gap before cancelling a tix split trade
TRADE_LAG_TIME = 1.25 # Estimate of how long it takes for Roblox to process our requests
RESET_TIME = 240 # Number of seconds the bot goes without trading before resetting last rates to be able to trade again (might result in loss)
DEQUE_SIZE = 15 # Max number of past trade rates to keep track of to money prevent loss
NUM_TRADES = 19 # Number of trades that display on the trade currency page
# Initializing requests.Session for frozen application
os.environ["REQUESTS_CA_BUNDLE"] = find_data_file('cacert.pem')
session = FuturesSession(max_workers=15)
adapter = requests.adapters.HTTPAdapter(max_retries=Retry(total=20,connect=10,read=10,backoff_factor=.5))
session.mount("http://", adapter)
session.mount("https://", adapter)
# Storing variables since they can't be stored in QObject
rates = DottedDict(
    dict(
        last_tix_rate = 0,
        last_robux_rate = 0,
        current_tix_rate = 0,
        current_robux_rate = 0,
        past_tix_rates = deque(maxlen=DEQUE_SIZE),
        past_robux_rates = deque(maxlen=DEQUE_SIZE),
    )
)

class Trader(QtCore.QObject):
开发者ID:cqian19,项目名称:Roblox-Valk-TC-Bot,代码行数:32,代码来源:actions.py

示例9: BZInfo

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class BZInfo(object):

    BZ_URL = 'https://bugzilla.mozilla.org'
    API_URL = BZ_URL + '/rest/bug'
    TIMEOUT = 60
    MAX_RETRIES = 5
    MAX_WORKERS = multiprocessing.cpu_count()
    CHUNK_SIZE = 8
    
    def __init__(self, bugids, credentials = None):
        self.results = [ ]
        self.credentials = credentials
        self.bugids = map(str, bugids)
        self.info = { }
        for bugid in self.bugids:
            self.info[bugid] = { 'ownership': [],
                                 'reviewers': set(),
                                 'commenters': { },
                                 'authorized': False }
        self.session = FuturesSession(max_workers = self.MAX_WORKERS)
        self.session.mount(self.BZ_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))
        self.reply_pattern = re.compile('^\(In reply to .* comment #([0-9]+)\)')
        self.dupbug_pattern = re.compile('\*\*\* Bug [0-9]+ has been marked as a duplicate of this bug. \*\*\*')
        self.review_pattern= re.compile('review\?\(([^\)]+)\)')
        self.needinfo_pattern= re.compile('needinfo\?\(([^\)]+)\)')
        self.feedback_pattern= re.compile('feedback\?\(([^\)]+)\)')
        self.__get_info()
        self.__analyze_history()
        self.__analyze_comment()

    def get(self):
        for r in self.results:
            r.result()
        return self.info

    def get_best_collaborator(self):
        # a collaboration between A & B is when A reviews a patch of B (or reciprocally)
        # in term of graph:
        #   - each node represents a reviewer or a writter (owner)
        #   - each edge represents a collaboration
        # here we count the degree of each node and find out who's the best collaborator
        # TODO: use this graph to get other metrics (??)

        # it could be interesting to weight each contribution according to its date
        # someone who made 20 contribs recently is probably better than someone 50 contribs
        # two years ago...
        # TODO: We could weight a contrib with a gaussian which depends to the time
        
        collaborations = { }
        for info in self.get().itervalues():
            if info['authorized']:
                owner = info['owner']
                if owner not in collaborations:
                    collaborations[owner] = 0
                reviewers = info['reviewers']
                feedbacks = info['feedbacks']
                collabs = set()
                if reviewers and owner in reviewers:
                    collabs |= reviewers[owner]
                if feedbacks and owner in feedbacks:
                    collabs |= feedbacks[owner]
                if collabs:
                    collaborations[owner] += len(collabs)
                    for person in collabs:
                        collaborations[person] = collaborations[person] + 1 if person in collaborations else 1
 
        # maybe we should compute the percentage of collaborations just to give an idea
 
        return utils.get_best(collaborations)

    def get_best_component_product(self):
        # Just get stats on components and products
        comps_prods = { }
        for info in self.get().itervalues():
            if info['authorized']:
                comp_prod = (info['component'], info['product'])
                comps_prods[comp_prod] = comps_prods[comp_prod] + 1 if comp_prod in comps_prods else 1

        if comps_prods:
            return utils.get_best(comps_prods)
        else:        
            return None

    def __get_apikey(self):
        if self.credentials:
            return self.credentials['tokens'][self.BZ_URL]
        else:
            return ''

    def __info_cb(self, sess, res):
        bugs = res.json()['bugs']
        for bug in bugs:
            self.info[str(bug['id'])].update({ 'authorized': True,
                                               'severity': bug['severity'],
                                               'votes': bug['votes'],
                                               'component': bug['component'],
                                               'product': bug['product'],
                                               'nbcc': len(bug['cc']),
                                               'reporter': bug['creator'],
                                               'owner': bug['assigned_to_detail']['email']})
#.........这里部分代码省略.........
开发者ID:lizzard,项目名称:clouseau,代码行数:103,代码来源:BZInfo.py

示例10: Track

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class Track(object):

    CRASH_STATS_URL = 'https://crash-stats.mozilla.com'
    SUPERSEARCH_URL = CRASH_STATS_URL + '/api/SuperSearch'
    TIMEOUT = 5
    MAX_RETRIES = 5
    MAX_WORKERS = multiprocessing.cpu_count()
    HG_PATTERN = re.compile('hg:hg.mozilla.org/mozilla-central:([^:]*):([a-z0-9]+)')
    
    def __init__(self, signature, day, day_delta = 1, credentials = None):
        self.results = [ ]
        self.credentials = credentials
        self.has_results = False
        self.day_delta = day_delta
        self.signature = signature
        self.info = { }
        self.date = utils.get_date_ymd(day)
        self.session = FuturesSession(max_workers = self.MAX_WORKERS)
        self.session.mount(self.CRASH_STATS_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))
        self.__get_info()

    def get(self):
        if not self.has_results:
            for r in self.results:
                r.result()
            self.has_results = True
        return self.info

    def has_addons(self):
        return len(self.get()['addons']) != 0

    def __get_apikey(self):
        if self.credentials:
            return self.credentials['tokens'][self.CRASH_STATS_URL]
        else:
            return ''

    @staticmethod
    def __get_stats(info, field):
        l = info[field]
        total = float(info['total'])
        stats = { }
        for e in l:
            stats[e['term']] = utils.percent(float(e['count']) / total)
        return stats
    
    @staticmethod
    def __get_system_memory_use_mean(info):
        l = info['system_memory_use_percentage']
        total = float(info['total'])
        l = [(float(e['count']) / total, float(e['term'])) for e in l]  
        m = 0.
        for e in l:
            m += e[0] * e[1]

        v = 0.
        for e in l:
            v += e[0] * (m - e[1]) ** 2

        return {'mean': utils.simple_percent(round(m, 0)), 'stddev': utils.simple_percent(round(math.sqrt(v), 0))}

    @staticmethod
    def __is_weird_address(addr, cpu_name):
        if addr == '0x0':
            return True
        if utils.is64(cpu_name):
            if len(addr) <= 10:
                val = long(addr, 16)
                if val <= 1L << 16: # val <= 0xffff (ie: first 64k)
                    return True
            elif addr.startswith('0xffffffff'):
                addr = addr[10:] # 10 == len('0xffffffff')
                val = long(addr, 16)
                if val >= ((1L << 32) - (1L << 16)): # val >= 0xfffffffffff0000 (ie: last 64k)
                    return True
开发者ID:lizzard,项目名称:clouseau,代码行数:77,代码来源:Track.py

示例11: Backtrace

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class Backtrace(object):

    CRASH_STATS_URL = 'https://crash-stats.mozilla.com'
    PROCESSED_URL = CRASH_STATS_URL + '/api/ProcessedCrash/'
    TIMEOUT = 5
    MAX_RETRIES = 5
    MAX_WORKERS = multiprocessing.cpu_count()
    
    def __init__(self, uuids, fraction = 0.2, just_hg = False, credentials = None):
        self.just_hg = just_hg
        self.results = [ ]
        self.credentials = credentials
        self.uuids = uuids
        self.fraction = max(0., min(fraction, 1.))
        self.info = { }
        self.session = FuturesSession(max_workers = self.MAX_WORKERS)
        self.session.mount(self.CRASH_STATS_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))
        self.__get_info()

    def get(self):
        for r in self.results:
            r.result()
        return self.info

    def __get_apikey(self):
        if self.credentials:
            return self.credentials['tokens'][self.CRASH_STATS_URL]
        else:
            return ''
    
    @staticmethod
    def __cycles_detection(funs):
        # TODO: improve this algorithm (not sure that's a good one)
        positions = { }
        # we get the function positions in the trace
        for i in range(len(funs)):
            fun = funs[i]
            if fun in positions:
                positions[fun].append(i)
            else:
                positions[fun] = [ i ]

        lengths = { }
        for k, v in positions.iteritems():
            if len(v) >= 2:
                l = v[1] - v[0]
                good = True
                for i in range(2, len(v)):
                    if v[i] - v[i - 1] != l:
                        good = False
                        break
                if good:
                    if l in lengths:
                        lengths[l].append((k, v))
                    else:
                        lengths[l] = [ (k, v) ]

        cycles = [ ]
        for k, v in lengths.iteritems():
            l = sorted(v, cmp = lambda x, y: cmp(x[1][0], y[1][0]))
            pat = [ ]
            container = [ l[0][0] ]
            pos = l[0][1][0]
            for i in range(1, len(l)):
                _pos = l[i][1][0]
                if _pos == pos + 1:
                    container.append(l[i][0])
                    pos = _pos
                else:
                    pat.append(tuple(container))
                    container = [ l[i][0] ]
                    pos = _pos

            pat.append(tuple(container))
            cycles += pat

        cycles = tuple(cycles)
        
        return cycles
    
    def __info_cb(self, sess, res):
        json = res.json()
        if 'json_dump' in json:
            uuid = json['uuid']
            jd = json['json_dump']
            if 'crashedThread' in json and 'threads' in jd:
                ct = json['crashedThread']
                ct = jd['threads'][ct]
                self.info[uuid] = { 'cycles': [ ],
                                        'address': '',
                                        'cpu_name': json['cpu_name'],
                                        'cpu_info': json['cpu_info'],
                                        'reason': json['reason'],
                                        'os': json['os_pretty_version'] }
                if 'frames' in ct:
                    frames = ct['frames']
                    functions = [ ]
                    # we get the functions in the backtrace (to check if there is a recursion)
                    for frame in frames:
                        if 'function' in frame:
#.........这里部分代码省略.........
开发者ID:lizzard,项目名称:clouseau,代码行数:103,代码来源:Backtrace.py

示例12: retrieve

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
    def retrieve(self, catalog, *, dry_run=False, media_type=''):
        if not dry_run:
            distributions = Distribution.objects.filter(
                division_id=catalog.division_id, http_status_code__isnull=True)

            if media_type:
                distributions = distributions.filter(mediaType=media_type)

            if not distributions.exists():
                return

            # Collect the distribution-response pairs.
            def callback(distribution, response):
                results.append([distribution, response])

            # Create a closure.
            def factory(distribution):
                return lambda session, response: callback(distribution, response)

            # @see http://docs.python-requests.org/en/latest/api/#requests.adapters.HTTPAdapter
            # @see https://github.com/ross/requests-futures/blob/master/requests_futures/sessions.py
            session = FuturesSession()
            # Avoids "Connection pool is full, discarding connection" warnings.
            adapter_kwargs = {'pool_maxsize': 10}
            session.mount('https://',
                          requests.adapters.HTTPAdapter(**adapter_kwargs))
            session.mount('http://',
                          requests.adapters.HTTPAdapter(**adapter_kwargs))

            # @see https://djangosnippets.org/snippets/1949/
            pk = 0
            last_pk = distributions.order_by('-pk')[0].pk
            distributions = distributions.order_by('pk')
            while pk < last_pk:
                # @see https://github.com/ross/requests-futures/issues/18
                # @see https://github.com/ross/requests-futures/issues/5
                futures = []
                results = []

                # If an exception occurs, we lose progress on at most 100 requests.
                for distribution in distributions.filter(pk__gt=pk)[:100]:
                    pk = distribution.pk

                    # @see http://docs.python-requests.org/en/latest/user/advanced/#body-content-workflow
                    # @see http://stackoverflow.com/a/845595/244258
                    futures.append(
                        session.get(
                            quote(
                                distribution.accessURL,
                                safe="%/:=&?~#+!$,;'@()*[]"),
                            stream=True,
                            verify=False,
                            background_callback=factory(distribution)))

                for future in futures:
                    try:
                        future.result()
                    except (requests.exceptions.ConnectionError,
                            requests.exceptions.InvalidSchema,
                            requests.exceptions.InvalidURL,
                            requests.exceptions.MissingSchema,
                            requests.exceptions.ReadTimeout,
                            requests.exceptions.SSLError,
                            requests.exceptions.TooManyRedirects,
                            requests.packages.urllib3.exceptions.ProtocolError
                            ):
                        self.exception('')

                for distribution, response in results:
                    status_code = response.status_code
                    charset = ''

                    content_length = response.headers.get('content-length')
                    if content_length:
                        content_length = int(content_length)

                    # @see https://github.com/kennethreitz/requests/blob/b137472936cbe6a6acabab538c1d05ed4c7da638/requests/utils.py#L308
                    content_type = response.headers.get('content-type', '')
                    if content_type:
                        content_type, params = cgi.parse_header(content_type)
                        if 'charset' in params:
                            charset = params['charset'].strip("'\"")

                    distribution.http_headers = dict(response.headers)
                    distribution.http_status_code = status_code
                    distribution.http_content_length = content_length
                    distribution.http_content_type = content_type
                    distribution.http_charset = charset
                    distribution.save()

                    self.debug('{} {} {}'.format(
                        status_code, number_to_human_size(content_length),
                        content_type))

                    response.close()
        self.info('{} done'.format(catalog))
开发者ID:jpmckinney,项目名称:inventory,代码行数:98,代码来源:headers.py

示例13: ResourceSyncPuSH

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class ResourceSyncPuSH(object):
    """
    The base class for the publisher, hub and resource. Contains
    methods for reading config files, making http requests, error handling,
    etc.
    """

    def __init__(self):
        """
        Inititalizes the Futures-Requests session with the
        max number of workers and retires.
        """

        # max workers and retries should be configurable?
        self.session = FuturesSession(max_workers=10)
        adapter = HTTPAdapter(max_retries=3)
        self.session.mount("http://", adapter)
        self._start_response = None

        # config parameters
        self.config = {}
        self.config['log_mode'] = ""
        self.config['mimetypes'] = []
        self.config['trusted_publishers'] = []
        self.config['trusted_topics'] = []
        self.config['my_url'] = ""
        self.config['hub_url'] = ""
        self.config['topic_url'] = ""
        self.config['subscribers_file'] = ""
        self.config['server_path'] = ""

        # logging messages
        self.log_msg = {}
        self.log_msg['payload'] = ""
        self.log_msg['msg'] = []
        self.log_msg['link_header'] = ""
        self.log_msg['module'] = ""

    def get_config(self, classname=None):
        """
        Finds and reads the config file. Reads the appropriate config values
        for the classname provided. For eg: if the classname is hub, it will
        read from the [hub] section in the config file.
        """

        if not classname:
            classname = self.__class__.__name__.lower()

        self.log_msg['module'] = classname

        # NOTE: more paths can be added to look for the config files.
        # order of files matter, the config in the first file
        # will be overwritten by the values in the next file.
        cnf_file = []
        cnf_file.extend([
            os.path.join(os.path.dirname(__file__),
                         "../conf/resourcesync_push.ini"),
            "/etc/resourcesync_push.ini",
            "/etc/resourcesync_push/resourcesync_push.ini",
        ])

        # loading values from configuration file
        conf = ConfigParser.ConfigParser()
        conf.read(cnf_file)
        if not conf:
            raise IOError("Unable to read config file")

        if classname == "hub":
            self.get_hub_config(conf)
        elif classname == "publisher":
            self.get_publisher_config(conf)
        elif classname == "subscriber":
            try:
                self.config['my_url'] = conf.get("subscriber", "url")
            except (NoSectionError, NoOptionError):
                print("The url value for subscriber is required \
                      in the config file.")
                raise

        self.get_demo_config(conf)

    def get_demo_config(self, conf):
        """
        Reads the [demo_hub] section from the config file if the
        log mode is set to 'demo'.
        """
        try:
            self.config['log_mode'] = conf.get("general", "log_mode")
        except (NoSectionError, NoOptionError):
            pass

        if not self.config['log_mode'] == "demo":
            return

        try:
            self.config['demo_hub_url'] = conf.get("demo_mode", "hub_url")
        except (NoSectionError, NoOptionError):
            print("Demo log mode requires a hub_url in the \
                  [demo_mode] section")
            raise
#.........这里部分代码省略.........
开发者ID:erinspace,项目名称:resourcesync_push,代码行数:103,代码来源:__init__.py

示例14: Connection

# 需要导入模块: from requests_futures.sessions import FuturesSession [as 别名]
# 或者: from requests_futures.sessions.FuturesSession import mount [as 别名]
class Connection(object):
    """Represents a connection to a server
    """

    TIMEOUT = 30
    MAX_RETRIES = 256
    MAX_WORKERS = multiprocessing.cpu_count()
    CHUNK_SIZE = 32
    TOKEN = ''

    # Error 429 is for 'Too many requests' => we retry
    STATUS_FORCELIST = [429]

    def __init__(self, base_url, queries=None, **kwargs):
        """Constructor

        Args:
            base_url (str): the server's url
            queries (Optional[Query]): the queries
        """

        self.session = FuturesSession(max_workers=self.MAX_WORKERS)
        retries = Retry(total=Connection.MAX_RETRIES, backoff_factor=1, status_forcelist=Connection.STATUS_FORCELIST)
        self.session.mount(base_url, HTTPAdapter(max_retries=retries))
        self.results = []
        self.queries = queries

        if kwargs:
            if 'timeout' in kwargs:
                self.TIMEOUT = kwargs['timeout']
            if 'max_retries' in kwargs:
                self.MAX_RETRIES = kwargs['max_retries']
            if 'max_workers' in kwargs:
                self.MAX_WORKERS = kwargs['max_workers']

        self.exec_queries()

    def __get_cb(self, query):
        """Get the callback to use when data have been retrieved

        Args:
            query (Query): the query

        Returns:
            function: the callback for the query
        """
        def cb(sess, res):
            if res.status_code == 200:
                try:
                    response = res.json()
                except:
                    response = res.text

                if query.handlerdata is not None:
                    query.handler(response, query.handlerdata)
                else:
                    query.handler(response)
            else:
                print('Connection error:')
                print('   url: ', res.url)
                print('   text: ', res.text)

        return cb

    def wait(self):
        """Just wait that all the queries have been treated
        """
        for r in self.results:
            r.result()

    def get_apikey(self):
        """Get the api key

        Returns:
            str: the api key
        """
        return self.TOKEN

    def get_header(self):
        """Get the header to use each query

        Returns:
            dict: the header
        """
        return {'User-Agent': 'clouseau', 'Connection': 'close'}

    def get_auth(self):
        """Get the auth to use each query

        Returns:
            dict: the auth
        """
        return None

    def exec_queries(self, queries=None):
        """Set and exec some queries

        Args:
            queries (Optional[Query]): the queries to exec
        """
#.........这里部分代码省略.........
开发者ID:La0,项目名称:clouseau,代码行数:103,代码来源:connection.py


注:本文中的requests_futures.sessions.FuturesSession.mount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。