本文整理汇总了Python中statsd.StatsClient.timer方法的典型用法代码示例。如果您正苦于以下问题:Python StatsClient.timer方法的具体用法?Python StatsClient.timer怎么用?Python StatsClient.timer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类statsd.StatsClient
的用法示例。
在下文中一共展示了StatsClient.timer方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: StatsdMetrics
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class StatsdMetrics(Metrics):
def __init__(self, host='localhost', port=8125, prefix=None):
self.statsd = StatsClient(host, port, prefix)
def fanout_timer(self, feed_class):
return self.statsd.timer('%s.fanout_latency' % feed_class.__name__)
def feed_reads_timer(self, feed_class):
return self.statsd.timer('%s.read_latency' % feed_class.__name__)
def on_feed_read(self, feed_class, activities_count):
self.statsd.incr('%s.reads' % feed_class.__name__, activities_count)
def on_feed_write(self, feed_class, activities_count):
self.statsd.incr('%s.writes' % feed_class.__name__, activities_count)
def on_feed_remove(self, feed_class, activities_count):
self.statsd.incr('%s.deletes' % feed_class.__name__, activities_count)
def on_fanout(self, feed_class, operation, activities_count=1):
metric = (feed_class.__name__, operation.__name__)
self.statsd.incr('%s.fanout.%s' % metric, activities_count)
def on_activity_published(self):
self.statsd.incr('activities.published')
def on_activity_removed(self):
self.statsd.incr('activities.removed')
示例2: FlaskStat
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class FlaskStat(object):
_xstat_title = None
_xstat_host = None
_xstat_port = None
_stat_client = None
def __init__(self, app=None):
super(FlaskStat, self).__init__()
if app:
self.init_app(app)
def init_app(self, app):
from flask import request, g
"""
绑定app
"""
self._xstat_title = app.config.get('XSTAT_TITLE')
self._xstat_host = app.config.get('XSTAT_HOST')
self._xstat_port = app.config.get('XSTAT_PORT') or constants.XSTAT_PORT
self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)
@app.before_request
@catch_exc
def prepare_stat():
if not request.endpoint:
return
g.xstat_timers = []
g.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'endpoint',
request.endpoint,
])
)
)
g.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'all',
])
)
)
for stat in g.xstat_timers:
stat.start()
@app.teardown_request
@catch_exc
def send_stat(exc):
if not hasattr(g, 'xstat_timers'):
return
for stat in g.xstat_timers:
stat.stop()
示例3: MapleStat
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class MapleStat(object):
_xstat_title = None
_xstat_host = None
_xstat_port = None
_stat_client = None
def __init__(self, app=None, config=None):
super(MapleStat, self).__init__()
if app:
self.init_app(app, config)
def init_app(self, app, config):
"""
绑定app
"""
self._xstat_title = config.get('XSTAT_TITLE')
self._xstat_host = config.get('XSTAT_HOST')
self._xstat_port = config.get('XSTAT_PORT') or constants.XSTAT_PORT
self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)
@app.before_request
@catch_exc
def prepare_stat(request):
if not request.endpoint:
return
request.xstat_timers = []
request.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'endpoint',
request.endpoint,
])
)
)
request.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'all',
])
)
)
for stat in request.xstat_timers:
stat.start()
@app.after_request
@catch_exc
def send_stat(request, exc):
if not hasattr(request, 'xstat_timers'):
return
for stat in request.xstat_timers:
stat.stop()
示例4: DjangoStat
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class DjangoStat(MiddlewareMixin):
_xstat_title = None
_xstat_host = None
_xstat_port = None
_stat_client = None
def __init__(self, *args, **kwargs):
from django.conf import settings
super(DjangoStat, self).__init__(*args, **kwargs)
self._xstat_title = getattr(settings, 'XSTAT_TITLE', None)
self._xstat_host = getattr(settings, 'XSTAT_HOST', None)
self._xstat_port = getattr(settings, 'XSTAT_PORT', None) or constants.XSTAT_PORT
self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)
@catch_exc
def process_view(self, request, view_func, view_args, view_kwargs):
"""
request.resolver_match.url_name 在process_view才可以取到
:return:
"""
request.xstat_timers = []
request.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'endpoint',
request.resolver_match.url_name,
])
)
)
request.xstat_timers.append(
self._stat_client.timer('.'.join([
self._xstat_title,
'all',
])
)
)
for stat in request.xstat_timers:
stat.start()
@catch_exc
def process_response(self, request, response):
"""
无论是否抛出异常,都会执行这一步
"""
if not hasattr(request, 'xstat_timers'):
return response
for stat in request.xstat_timers:
stat.stop()
return response
示例5: _Statsd
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class _Statsd(object):
def __init__(self, config):
if config.get('datadog', True):
initialize(statsd_host=config['host'],
statsd_port=config['port'],
prefix=config['prefix'])
self.datadog = True
self._statsd = statsd
else:
self.datadog = False
self._statsd = StatsClient(config['host'],
config['port'],
config['prefix'])
def incr(self, metric, count=1, rate=1, **kw):
if self.datadog:
return self._statsd.increment(metric, value=count,
sample_rate=rate, **kw)
else:
return self._statsd.incr(metric, count=count, rate=rate)
def timer(self, metric, rate=1, **kw):
if self.datadog:
return self._statsd.timed(metric, sample_rate=rate, **kw)
else:
return self._statsd.timer(metric, rate=rate)
示例6: time_stack_list
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
def time_stack_list(username, password, tenant, auth_url, heat_url, region,
statsd_server):
keystone = keystone_client(username=username, password=password,
tenant_name=tenant, auth_url=auth_url)
token = keystone.auth_token
heat = heat_client('1', endpoint=heat_url, region_name=region, token=token)
statsd = StatsClient(host=statsd_server)
with statsd.timer('uptime.{}'.format(region)):
list(heat.stacks.list())
示例7: StatsD
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class StatsD(object):
def __init__(self, app=None, config=None):
self.config = None
self.statsd = None
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
self.config.setdefault('STATSD_HOST', 'localhost')
self.config.setdefault('STATSD_PORT', 8125)
self.config.setdefault('STATSD_PREFIX', None)
self.app = app
self.statsd = StatsClient(self.config['STATSD_HOST'],
self.config['STATSD_PORT'], self.config['STATSD_PREFIX'])
def timer(self, *args, **kwargs):
return self.statsd.timer(*args, **kwargs)
def timing(self, *args, **kwargs):
return self.statsd.timing(*args, **kwargs)
def incr(self, *args, **kwargs):
return self.statsd.incr(*args, **kwargs)
def decr(self, *args, **kwargs):
return self.statsd.decr(*args, **kwargs)
def gauge(self, *args, **kwargs):
return self.statsd.gauge(*args, **kwargs)
def set(self, *args, **kwargs):
return self.statsd.set(*args, **kwargs)
示例8: StatsD
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class StatsD(object):
def __init__(self, app=None, config=None):
self.config = None
self.statsd = None
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app, config=None):
if config is not None:
self.config = config
elif self.config is None:
self.config = app.config
self.config.setdefault("STATSD_HOST", "localhost")
self.config.setdefault("STATSD_PORT", 8125)
self.config.setdefault("STATSD_PREFIX", None)
self.app = app
self.statsd = StatsClient(
host=self.config["STATSD_HOST"], port=self.config["STATSD_PORT"], prefix=self.config["STATSD_PREFIX"]
)
def timer(self, *args, **kwargs):
return self.statsd.timer(*args, **kwargs)
def timing(self, *args, **kwargs):
return self.statsd.timing(*args, **kwargs)
def incr(self, *args, **kwargs):
return self.statsd.incr(*args, **kwargs)
def decr(self, *args, **kwargs):
return self.statsd.decr(*args, **kwargs)
def gauge(self, *args, **kwargs):
return self.statsd.gauge(*args, **kwargs)
示例9: ns
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
# Uptime
pipe.gauge('os.uptime', uptime.uptime())
# Host clock offset
try:
response = ntplib.NTPClient().request('pool.ntp.org')
except ntplib.NTPException:
pass
else:
pipe.gauge('os.ntp.offset', response.offset)
start = used = 0
metric = ns('watchtower', 'gathering')
while True:
start = int(time.time())
with statsd.timer(metric):
loadavg()
ram()
network()
os_status()
used = int(time.time()) - start
try:
time.sleep(10 - used) # sleep for the remainder of the interval
except IOError as exc:
print "IOError on time.sleep(10 - %r): %s" % (used, exc)
# Default sleep after an error
time.sleep(8)
示例10: get_option
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
conn = util.opendb()
c = conn.cursor()
util.create_schema(c)
auth = util.authinfo(c)
(q,option) = get_option(c,q)
last_q = q.split(' ')[-1]
if q.startswith('_'): # option
process_option(c,q)
elif q.startswith('+'): # add bookmark
add_bookmark(c,q)
elif last_q.startswith('#') and (':' not in q): # tag expansion
pbsearch_tag(c,'',last_q[1:])
else:
pbsearch_sql(c,option,q)
util.closedb(conn)
if __name__ == '__main__':
try:
statsd = StatsClient(host='g.jmjeong.com',
port=8125,
prefix='jmjeong.alfred.bookmark')
with statsd.timer('main'):
statsd.incr('launch');
main()
except:
main()
示例11: Worker
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
#.........这里部分代码省略.........
if job_id:
self._update_status('jobs_count')
logging.info("[%s] Processing %s", self.name, job_id)
self._fork_and_process(job_id)
if self.child_pid == 0:
return
else:
time.sleep(self.sleep_time)
def _update_status(self, counter):
self.status['updated_at'] = time.time()
self.status[counter] += 1
self._save_status()
@property
def _key(self):
return 'worker:%s' % self.worker_id
def _save_status(self):
self.manager.redis_connection.hmset(self._key, self.status)
def _fork_and_process(self, job_id):
self.current_job_id = job_id
self.child_pid = os.fork()
if self.child_pid == 0:
self.set_title("processing %s" % job_id)
self._process(job_id)
else:
logging.info("[%s] Waiting for pid: %d", self.name, self.child_pid)
try:
_, status = os.waitpid(self.child_pid, 0)
except OSError:
logging.info("[%s] OSError while waiting for child to finish", self.name)
# setting status to >0, so the job cleanup is triggered
status = 1
self._update_status('done_jobs_count')
job = Job.load(self.manager.redis_connection, job_id)
if status > 0 and not job.is_finished():
self._update_status('cancelled_jobs_count')
logging.info("[%s] process interrupted and job %s hasn't finished; registering interruption in job",
self.name, job_id)
job.done(None, "Interrupted/Cancelled while running.")
job.expire(settings.JOB_EXPIRY_TIME)
logging.info("[%s] Finished Processing %s (pid: %d status: %d)",
self.name, job_id, self.child_pid, status)
self.child_pid = None
self.current_job_id = None
def _process(self, job_id):
redis_connection = redis.StrictRedis(**self.redis_connection_params)
job = Job.load(redis_connection, job_id)
if job.is_finished():
logging.warning("[%s][%s] tried to process finished job.", self.name, job)
return
pid = os.getpid()
job.processing(pid)
logging.info("[%s][%s] running query...", self.name, job.id)
start_time = time.time()
self.set_title("running query %s" % job_id)
logging.info("[%s][%s] Loading query runner (%s, %s)...", self.name, job.id,
job.data_source_name, job.data_source_type)
query_runner = get_query_runner(job.data_source_type, job.data_source_options)
if getattr(query_runner, 'annotate_query', True):
annotated_query = "/* Pid: %s, Job Id: %s, Query hash: %s, Priority: %s */ %s" % \
(pid, job.id, job.query_hash, job.priority, job.query)
else:
annotated_query = job.query
# TODO: here's the part that needs to be forked, not all of the worker process...
with self.statsd_client.timer('worker_{}.query_runner.{}.{}.run_time'.format(self.worker_id,
job.data_source_type,
job.data_source_name)):
data, error = query_runner(annotated_query)
run_time = time.time() - start_time
logging.info("[%s][%s] query finished... data length=%s, error=%s",
self.name, job.id, data and len(data), error)
# TODO: it is possible that storing the data will fail, and we will need to retry
# while we already marked the job as done
query_result_id = None
if not error:
self.set_title("storing results %s" % job_id)
query_result_id = self.manager.store_query_result(job.data_source_id,
job.query, data, run_time,
datetime.datetime.utcnow())
self.set_title("marking job as done %s" % job_id)
job.done(query_result_id, error)
示例12: StatsClient
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
from statsd import StatsClient
from datetime import datetime
from time import sleep
statsd_client = StatsClient(host='metrics')
for x in range(100,1000,100): # start of loop
print 'sleeping for {0} ms'.format(x) # print sleep time
with statsd_client.timer('sd_timer'): # begin statsd timer
sleep(float(x)/float(1000)) # sleep X milliseconds
示例13: StatsClient
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
from statsd import StatsClient
from datetime import datetime
from time import sleep
statsd_client = StatsClient(host='metrics')
'''
starting to include tags (remember: tags have to be strings)
'''
for x in range(100,1000,100):
print 'sleeping for {0} ms'.format(x)
with statsd_client.timer('sd_timer,tag1=foo,x={}'.format(x)):
sleep(float(x)/float(1000))
示例14: Client
# 需要导入模块: from statsd import StatsClient [as 别名]
# 或者: from statsd.StatsClient import timer [as 别名]
class Client(object):
def __init__(self, server, zero_fill=True, **kw):
self.server = server.rstrip('/')
self.session = requests.session()
# getting monolith info
info = self.session.get(server).json
if callable(info):
info = info()
self.es = self.server + info['es_endpoint']
self.fields = info['fields']
self.zero_fill = zero_fill
# statsd settings
statsd_host = kw.get('statsd.host', 'localhost')
statsd_port = int(kw.get('statsd.port', 8125))
statsd_prefix = kw.get('statsd.prefix', 'monolith.client')
self.statsd = StatsClient(host=statsd_host, port=statsd_port,
prefix=statsd_prefix)
def __call__(self, field, start, end, interval=DAY, strict_range=False,
**terms):
if isinstance(interval, basestring):
interval = _str2interval[interval.encode()]
if isinstance(start, basestring):
start = datetime.datetime.strptime(start.encode(),
'%Y-%m-%d').toordinal()
start = datetime.date.fromordinal(start)
end = datetime.datetime.strptime(end.encode(),
'%Y-%m-%d').toordinal()
end = datetime.date.fromordinal(end)
if interval == DAY:
drange = util.iterdays(start, end)
elif interval == WEEK:
drange = util.iterweeks(start, end)
elif interval == MONTH:
drange = util.itermonths(start, end)
else:
drange = util.iteryears(start, end)
# building the query
start_date_str = start.strftime('%Y-%m-%d')
end_date_str = end.strftime('%Y-%m-%d')
if isinstance(interval, int):
interval = _interval2str[interval]
# XXX we'll see later if we want to provide a
# nicer query interface
# we need a facet query
if strict_range:
greater = "gt"
lower = "lt"
else:
greater = "gte"
lower = "lte"
query = {
"query": {
"match_all": {},
},
"size": 0, # we aren't interested in the hits
"facets": {
"histo1": {
"date_histogram": {
"value_field": field,
"interval": interval,
"key_field": "date",
},
"facet_filter": {
"range": {
"date": {
greater: start_date_str,
lower: end_date_str,
}
}
}
}
}
}
if len(terms) > 0:
term = {}
for key, value in terms.items():
term[key] = value
range_ = query['facets']['histo1']['facet_filter']['range']
filter_ = {'and': [{'term': term},
{'range': range_}]}
query['facets']['histo1']['facet_filter'] = filter_
with self.statsd.timer('elasticsearch-query'):
res = self.session.post(self.es, data=json.dumps(query))
if res.status_code != 200:
#.........这里部分代码省略.........