本文整理汇总了Python中carbon.cache.MetricCache.counts方法的典型用法代码示例。如果您正苦于以下问题:Python MetricCache.counts方法的具体用法?Python MetricCache.counts怎么用?Python MetricCache.counts使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类carbon.cache.MetricCache
的用法示例。
在下文中一共展示了MetricCache.counts方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _flush
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def _flush(prefix=None):
""" Write/create whisped files at maximal speed """
assert(prefix==None or hasattr(prefix, 'startswith'))
log.msg("flush started (prefix: %s)" % prefix)
started = time.time()
metrics = MetricCache.counts()
updates = 0
write_lock.acquire()
try:
for metric, queueSize in metrics:
if prefix and not metric.startswith(prefix):
continue
dbFilePath = getFilesystemPath(metric)
dbFileExists = exists(dbFilePath)
try:
datapoints = MetricCache.pop(metric)
except KeyError:
continue
if not createWhisperFile(metric, dbFilePath, dbFileExists):
continue
if not writeWhisperFile(dbFilePath, datapoints):
continue
updates += 1
finally:
write_lock.release()
log.msg('flush finished (updates: %d, time: %.5f sec)' % (updates, time.time()-started))
return updates
示例2: optimalWriteOrder
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def optimalWriteOrder():
"Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
global lastCreateInterval
global createCount
metrics = MetricCache.counts()
t = time.time()
metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))
for metric, queueSize in metrics:
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
# Let our persister do its own check, and ignore the metric if needed.
if not persister.pre_get_datapoints_check(metric):
continue
try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
datapoints = MetricCache.pop(metric)
except KeyError:
log.msg("MetricCache contention, skipping %s update for now" % metric)
continue # we simply move on to the next metric when this race condition occurs
dbInfo = persister.get_dbinfo(metric)
dbIdentifier = dbInfo[0]
dbExists = dbInfo[1]
yield (metric, datapoints, dbIdentifier, dbExists)
示例3: getMetrics
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def getMetrics(self):
metrics = MetricCache.counts()
for metric, queueSize in metrics:
datapoints = MetricCache.pop(metric)
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
yield (metric, datapoints)
示例4: optimalWriteOrder
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def optimalWriteOrder():
"Generates metrics with the most cached values first and applies a soft rate limit on new metrics"
global lastCreateInterval
global createCount
metrics = MetricCache.counts()
t = time.time()
#metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
for metric, queueSize in metrics:
#Handling special characters in metric names
metric_sanit = list(metric)
for i in range(0,len(metric_sanit),1):
if metric_sanit[i] < '\x20' or metric_sanit[i] > '\x7e':
metric_sanit[i] = '_'
metric_sanit = "".join(metric_sanit)
try:
dbFilePath = getFilesystemPath(metric_sanit)
dbFileExists = exists(dbFilePath)
except:
log.err()
log.msg("dbFilePath: %s" % (dbFilePath))
continue
if not dbFileExists:
createCount += 1
now = time.time()
if now - lastCreateInterval >= 60:
lastCreateInterval = now
createCount = 1
elif createCount >= settings.MAX_CREATES_PER_MINUTE:
# dropping queued up datapoints for new metrics prevents filling up the entire cache
# when a bunch of new metrics are received.
try:
MetricCache.pop(metric)
except KeyError:
pass
continue
try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
datapoints = MetricCache.pop(metric)
except KeyError:
log.msg("MetricCache contention, skipping %s update for now" % metric)
continue # we simply move on to the next metric when this race condition occurs
yield (metric_sanit, datapoints, dbFilePath, dbFileExists)
示例5: optimalWriteOrder
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def optimalWriteOrder():
log.msg("Entered optimalWriteOrder")
metrics = MetricCache.counts()
t = time.time()
metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t))
for metric, queueSize in metrics:
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
datapoints = MetricCache.pop(metric)
except KeyError:
log.msg("MetricCache contention, skipping %s update for now" % metric)
continue # we simply move on to the next metric when this race condition occurs
yield (metric, datapoints)
示例6: writeCachedDataPoints
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def writeCachedDataPoints(channel, exchange):
log.msg("Entered function writeCachedDataPoints")
log.msg("MetricCache count: %d"%(len(MetricCache.counts())))
while MetricCache:
dataWritten = False
log.msg("Calling optimalWriteOrder")
for (metric, datapoints) in optimalWriteOrder():
dataWritten = True
body = ""
for point in datapoints:
temp = "%f %d\n"%(point[1], point[0])
body = body + temp
message = Content(body)
message["delivery mode"] = 2
channel.basic_publish(exchange=exchange, content=message, routing_key=metric)
log.updates("Published %d datapoints of metric %s"%(len(datapoints),metric))
示例7: optimalWriteOrder
# 需要导入模块: from carbon.cache import MetricCache [as 别名]
# 或者: from carbon.cache.MetricCache import counts [as 别名]
def optimalWriteOrder():
"""Generates metrics with the most cached values first and applies a soft
rate limit on new metrics"""
global lastCreateInterval
global createCount
metrics = MetricCache.counts()
time_ = time.time()
metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending
log.debug("Sorted %d cache queues in %.6f seconds" % (len(metrics),
time.time() - time_))
for metric, queueSize in metrics:
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
dbFileExists = APP_DB.exists(metric)
if not dbFileExists:
createCount += 1
now = time.time()
if now - lastCreateInterval >= 60:
lastCreateInterval = now
createCount = 1
elif createCount >= settings.MAX_CREATES_PER_MINUTE:
# dropping queued up datapoints for new metrics prevents filling up the entire cache
# when a bunch of new metrics are received.
try:
MetricCache.pop(metric)
except KeyError:
pass
continue
try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store()
datapoints = MetricCache.pop(metric)
except KeyError:
log.msg("MetricCache contention, skipping %s update for now" % metric)
continue # we simply move on to the next metric when this race condition occurs
yield (metric, datapoints, dbFileExists)