当前位置: 首页>>代码示例>>Python>>正文


Python cache.MetricCache类代码示例

本文整理汇总了Python中carbon.cache.MetricCache的典型用法代码示例。如果您正苦于以下问题:Python MetricCache类的具体用法?Python MetricCache怎么用?Python MetricCache使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MetricCache类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: optimalWriteOrder

def optimalWriteOrder():
  "Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
  global lastCreateInterval
  global createCount
  metrics = MetricCache.counts()

  t = time.time()
  metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
  log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))

  for metric, queueSize in metrics:
    if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
      events.cacheSpaceAvailable()

    # Let our persister do its own check, and ignore the metric if needed.
    if not persister.pre_get_datapoints_check(metric):
        continue

    try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue # we simply move on to the next metric when this race condition occurs

    dbInfo = persister.get_dbinfo(metric)
    dbIdentifier = dbInfo[0]
    dbExists = dbInfo[1]

    yield (metric, datapoints, dbIdentifier, dbExists)
开发者ID:arowser,项目名称:carbon-postgres-patches,代码行数:29,代码来源:writer.py

示例2: optimalWriteOrder

def optimalWriteOrder():
  "Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
  global lastCreateInterval
  global createCount
  metrics = [ (metric, len(datapoints)) for metric,datapoints in MetricCache.items() ]

  t = time.time()
  metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
  log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))

  for metric, queueSize in metrics:
    dbFilePath = getFilesystemPath(metric)
    dbFileExists = exists(dbFilePath)

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        continue

    try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue # we simply move on to the next metric when this race condition occurs

    yield (metric, datapoints, dbFilePath, dbFileExists)
开发者ID:ZachGoldberg,项目名称:Graphite,代码行数:32,代码来源:writer.py

示例3: getMetrics

 def getMetrics(self):
     metrics = MetricCache.counts()
     for metric, queueSize in metrics:
         datapoints = MetricCache.pop(metric)
         if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
             events.cacheSpaceAvailable() 
         yield (metric, datapoints)
开发者ID:niteshjain,项目名称:CarbonAMQP_Producer,代码行数:7,代码来源:amqp_publisher.py

示例4: _flush

def _flush(prefix=None):
    """ Write/create whisped files at maximal speed """
    assert(prefix==None or hasattr(prefix, 'startswith'))
    log.msg("flush started (prefix: %s)" % prefix)
    started = time.time()
    metrics = MetricCache.counts()
    updates = 0
    write_lock.acquire()
    try:
        for metric, queueSize in metrics:
            if prefix and not metric.startswith(prefix):
                continue
            dbFilePath = getFilesystemPath(metric)
            dbFileExists = exists(dbFilePath)
            try:
                datapoints = MetricCache.pop(metric)
            except KeyError:
                continue
            if not createWhisperFile(metric, dbFilePath, dbFileExists):
                continue
            if not writeWhisperFile(dbFilePath, datapoints):
                continue
    	    updates += 1
    finally:
        write_lock.release()
    log.msg('flush finished (updates: %d, time: %.5f sec)' % (updates, time.time()-started))
    return updates
开发者ID:penpen,项目名称:carbon,代码行数:27,代码来源:writer.py

示例5: backIntoCache

def backIntoCache(metricList):
  for (metric, datapoints) in metricList:
    for point in datapoints:
      try:
        MetricCache.store(metric, point)
      except:
        datapoints.append(point)
  log.msg("Failed to publish to RabbitMQ. Pushed the metrics back to cache")
开发者ID:pratX,项目名称:CarbonRelay_AckConsumer_d9-graphite,代码行数:8,代码来源:amqp_publisher.py

示例6: optimalWriteOrder

def optimalWriteOrder():
  "Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
  global lastCreateInterval
  global createCount
  metrics = MetricCache.counts()

  t = time.time()
  #metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
  log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))

  if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
    events.cacheSpaceAvailable()

  for metric, queueSize in metrics:
    #Handling special characters in metric names
    metric_sanit = list(metric)
    for i in range(0,len(metric_sanit),1):
        if metric_sanit[i] < '\x20' or metric_sanit[i] > '\x7e':
            metric_sanit[i] = '_'
    metric_sanit = "".join(metric_sanit)

    try:
      dbFilePath = getFilesystemPath(metric_sanit)
      dbFileExists = exists(dbFilePath)
    except:
      log.err()
      log.msg("dbFilePath: %s" % (dbFilePath))
      continue

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        # dropping queued up datapoints for new metrics prevents filling up the entire cache
        # when a bunch of new metrics are received.
        try:
          MetricCache.pop(metric)
        except KeyError:
          pass

        continue

    try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue # we simply move on to the next metric when this race condition occurs

    yield (metric_sanit, datapoints, dbFilePath, dbFileExists)
开发者ID:pratX,项目名称:CarbonCacheSSD_d9-graphite,代码行数:54,代码来源:writer.py

示例7: optimalWriteOrder

def optimalWriteOrder():
        log.msg("Entered optimalWriteOrder")
        metrics = MetricCache.counts()
        t = time.time()
        metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
        log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))
        
        for metric, queueSize in metrics:
          if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
            events.cacheSpaceAvailable()
          try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
            datapoints = MetricCache.pop(metric)
          except KeyError:
            log.msg("MetricCache contention, skipping %s update for now" % metric)
            continue # we simply move on to the next metric when this race condition occurs
          yield (metric, datapoints) 
开发者ID:pratX,项目名称:CarbonRelay_AckConsumer_d9-graphite,代码行数:16,代码来源:amqp_pub.py

示例8: optimalWriteOrder

def optimalWriteOrder():
  """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
  global lastCreateInterval
  global createCount
  metrics = MetricCache.counts()

  time_ = time.time()
  metrics.sort(key=lambda item: item[1], reverse=True)  # by queue size, descending
  log.debug("Sorted %d cache queues in %.6f seconds" % (len(metrics),
                                                        time.time() - time_))

  for metric, queueSize in metrics:
    if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
      events.cacheSpaceAvailable()

    dbFileExists = APP_DB.exists(metric)

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        # dropping queued up datapoints for new metrics prevents filling up the entire cache
        # when a bunch of new metrics are received.
        try:
          MetricCache.pop(metric)
        except KeyError:
          pass

        continue

    try:  # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
      datapoints = MetricCache.pop(metric)
    except KeyError:
      log.msg("MetricCache contention, skipping %s update for now" % metric)
      continue  # we simply move on to the next metric when this race condition occurs

    yield (metric, datapoints, dbFileExists)
开发者ID:jbooth,项目名称:carbon,代码行数:43,代码来源:writer.py

示例9: stringReceived

  def stringReceived(self, rawRequest):
    request = self.unpickler.loads(rawRequest)
    cache = MetricCache()
    if request['type'] == 'cache-query':
      metric = request['metric']
      datapoints = list(cache.get(metric, {}).items())
      result = dict(datapoints=datapoints)
      if settings.LOG_CACHE_HITS:
        log.query('[%s] cache query for \"%s\" returned %d values' % (
          self.peerAddr, metric, len(datapoints)
        ))
      instrumentation.increment('cacheQueries')

    elif request['type'] == 'cache-query-bulk':
      datapointsByMetric = {}
      metrics = request['metrics']
      for metric in metrics:
        datapointsByMetric[metric] = list(cache.get(metric, {}).items())

      result = dict(datapointsByMetric=datapointsByMetric)

      if settings.LOG_CACHE_HITS:
        log.query('[%s] cache query bulk for \"%d\" metrics returned %d values' % (
          self.peerAddr,
          len(metrics),
          sum([len(datapoints) for datapoints in datapointsByMetric.values()])
        ))
      instrumentation.increment('cacheBulkQueries')
      instrumentation.append('cacheBulkQuerySize', len(metrics))

    elif request['type'] == 'get-metadata':
      result = management.getMetadata(request['metric'], request['key'])

    elif request['type'] == 'set-metadata':
      result = management.setMetadata(request['metric'], request['key'], request['value'])

    else:
      result = dict(error="Invalid request type \"%s\"" % request['type'])

    response = pickle.dumps(result, protocol=2)
    self.sendString(response)
开发者ID:NixM0nk3y,项目名称:carbon,代码行数:41,代码来源:protocols.py

示例10: optimalWriteOrder

def optimalWriteOrder():
  """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
  cache = MetricCache()
  while cache:
    (metric, datapoints) = cache.drain_metric()
    dbFileExists = state.database.exists(metric)

    if not dbFileExists and CREATE_BUCKET:
      # If our tokenbucket has enough tokens available to create a new metric
      # file then yield the metric data to complete that operation. Otherwise
      # we'll just drop the metric on the ground and move on to the next
      # metric.
      # XXX This behavior should probably be configurable to no tdrop metrics
      # when rate limitng unless our cache is too big or some other legit
      # reason.
      if CREATE_BUCKET.drain(1):
        yield (metric, datapoints, dbFileExists)
      continue

    yield (metric, datapoints, dbFileExists)
开发者ID:criteo-forks,项目名称:carbon,代码行数:21,代码来源:writer.py

示例11: test_write_strategy_sorted

    def test_write_strategy_sorted(self):
        """Create a metric cache, insert metrics, ensure sorted writes"""
        self.assertEqual("sorted", MetricCache.method)
        now = time.time()
        datapoint1 = (now - 10, float(1))
        datapoint2 = (now, float(2))
        MetricCache.store("d.e.f", datapoint1)
        MetricCache.store("a.b.c", datapoint1)
        MetricCache.store("a.b.c", datapoint2)

        (m, d) = MetricCache.pop()
        self.assertEqual(("a.b.c", deque([datapoint1, datapoint2])), (m, d))
        (m, d) = MetricCache.pop()
        self.assertEqual(("d.e.f", deque([datapoint1])), (m, d))

        self.assertEqual(0, MetricCache.size)
开发者ID:opentable,项目名称:carbon,代码行数:16,代码来源:test_cache.py

示例12: pre_retrieve_metric_check

  def pre_retrieve_metric_check(self, metric):
    dbinfo = self.get_dbinfo(metric)
    dbFilePath = dbInfo[0]
    dbFileExists = dbInfo[1]

    if not dbFileExists:
      createCount += 1
      now = time.time()

      if now - lastCreateInterval >= 60:
        lastCreateInterval = now
        createCount = 1

      elif createCount >= settings.MAX_CREATES_PER_MINUTE:
        # dropping queued up datapoints for new metrics prevents filling up the entire cache
        # when a bunch of new metrics are received.
        try:
          MetricCache.pop(metric)
        except KeyError:
          pass

        return False

    return True
开发者ID:arowser,项目名称:carbon-postgres-patches,代码行数:24,代码来源:persister.py

示例13: writeCachedDataPoints

def writeCachedDataPoints(channel, exchange):
        log.msg("Entered function writeCachedDataPoints")
        log.msg("MetricCache count: %d"%(len(MetricCache.counts())))
        while MetricCache:
          dataWritten = False
          log.msg("Calling optimalWriteOrder")
          for (metric, datapoints) in optimalWriteOrder():
            dataWritten = True      
            body = ""
            for point in datapoints:
              temp = "%f %d\n"%(point[1], point[0])
              body = body + temp
            message = Content(body)
            message["delivery mode"] = 2

            channel.basic_publish(exchange=exchange, content=message, routing_key=metric)
            log.updates("Published %d datapoints of metric %s"%(len(datapoints),metric))
开发者ID:pratX,项目名称:CarbonRelay_AckConsumer_d9-graphite,代码行数:17,代码来源:amqp_pub.py

示例14: optimalWriteOrder

def optimalWriteOrder():
  """Generates metrics with the most cached values first and applies a soft
  rate limit on new metrics"""
  while MetricCache:
    (metric, datapoints) = MetricCache.pop()
    if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
      events.cacheSpaceAvailable()

    dbFilePath = getFilesystemPath(metric)
    dbFileExists = exists(dbFilePath)

    if not dbFileExists and CREATE_BUCKET:
      # If our tokenbucket has enough tokens available to create a new metric
      # file then yield the metric data to complete that operation. Otherwise
      # we'll just drop the metric on the ground and move on to the next
      # metric.
      # XXX This behavior should probably be configurable to no tdrop metrics
      # when rate limitng unless our cache is too big or some other legit
      # reason.
      if CREATE_BUCKET.drain(1):
        yield (metric, datapoints, dbFilePath, dbFileExists)
      continue

    yield (metric, datapoints, dbFilePath, dbFileExists)
开发者ID:jacklesplat,项目名称:ql_emc_graphite,代码行数:24,代码来源:writer.py

示例15: store

def store(metric, value):
  fullMetric = 'carbon.agents.%s.%s' % (HOSTNAME, metric)
  datapoint = (time.time(), value)
  MetricCache.store(fullMetric, datapoint)
开发者ID:ZachGoldberg,项目名称:Graphite,代码行数:4,代码来源:instrumentation.py


注:本文中的carbon.cache.MetricCache类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。