本文整理汇总了Python中sentry.utils.metrics.timing函数的典型用法代码示例。如果您正苦于以下问题:Python timing函数的具体用法?Python timing怎么用?Python timing使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了timing函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, function):
start = self.clock.time()
try:
for i in itertools.count(1):
try:
return function()
except self.exceptions as error:
delay = self.delay(i)
now = self.clock.time()
if (now + delay) > (start + self.timeout):
raise RetryException(
'Could not successfully execute %r within %.3f seconds (%s attempts.)' %
(function, now - start, i),
error,
)
else:
logger.debug(
'Failed to execute %r due to %r on attempt #%s, retrying in %s seconds...',
function,
error,
i,
delay,
)
self.clock.sleep(delay)
finally:
if self.metric_instance:
metrics.timing(
'timedretrypolicy.duration',
self.clock.time() - start,
instance=self.metric_instance,
tags=self.metric_tags,
)
示例2: process_pending
def process_pending(self):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(self.pending_key)
# prevent a stampede due to celerybeat + periodic task
if not client.set(lock_key, '1', nx=True, ex=60):
return
try:
keycount = 0
with self.cluster.all() as conn:
results = conn.zrange(self.pending_key, 0, -1)
with self.cluster.all() as conn:
for host_id, keys in six.iteritems(results.value):
if not keys:
continue
keycount += len(keys)
for key in keys:
process_incr.apply_async(kwargs={
'key': key,
})
conn.target([host_id]).zrem(self.pending_key, *keys)
metrics.timing('buffer.pending-size', keycount)
finally:
client.delete(lock_key)
示例3: index_event_tags
def index_event_tags(organization_id, project_id, event_id, tags,
group_id, environment_id, date_added=None, **kwargs):
from sentry import tagstore
with configure_scope() as scope:
scope.set_tag("project", project_id)
create_event_tags_kwargs = {}
if date_added is not None:
create_event_tags_kwargs['date_added'] = date_added
metrics.timing(
'tagstore.tags_per_event',
len(tags),
tags={
'organization_id': organization_id,
}
)
tagstore.create_event_tags(
project_id=project_id,
group_id=group_id,
environment_id=environment_id,
event_id=event_id,
tags=tags,
**create_event_tags_kwargs
)
示例4: putfile
def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE, commit=True):
"""
Save a fileobj into a number of chunks.
Returns a list of `FileBlobIndex` items.
>>> indexes = file.putfile(fileobj)
"""
results = []
offset = 0
checksum = sha1(b'')
while True:
contents = fileobj.read(blob_size)
if not contents:
break
checksum.update(contents)
blob_fileobj = ContentFile(contents)
blob = FileBlob.from_file(blob_fileobj)
results.append(FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
))
offset += blob.size
self.size = offset
self.checksum = checksum.hexdigest()
metrics.timing('filestore.file-size', offset)
if commit:
self.save()
return results
示例5: process_pending
def process_pending(self):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(self.pending_key)
# prevent a stampede due to celerybeat + periodic task
if not client.set(lock_key, '1', nx=True, ex=60):
return
try:
for host_id in self.cluster.hosts.iterkeys():
conn = self.cluster.get_local_client(host_id)
keys = conn.zrange(self.pending_key, 0, -1)
if not keys:
continue
keycount = 0
for key in keys:
keycount += 1
process_incr.apply_async(kwargs={
'key': key,
})
pipe = conn.pipeline()
pipe.zrem(self.pending_key, *keys)
pipe.execute()
metrics.timing('buffer.pending-size', keycount)
finally:
client.delete(lock_key)
示例6: putfile
def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE):
"""
Save a fileobj into a number of chunks.
Returns a list of `FileBlobIndex` items.
>>> indexes = file.putfile(fileobj)
"""
results = []
offset = 0
while True:
contents = fileobj.read(blob_size)
if not contents:
break
blob_fileobj = ContentFile(contents)
blob = FileBlob.from_file(blob_fileobj)
results.append(
FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
)
)
offset += blob.size
metrics.timing('filestore.file-size', offset)
return results
示例7: save_event
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
"""
Saves an event to the database.
"""
from sentry.event_manager import EventManager
if cache_key:
data = default_cache.get(cache_key)
if event_id is None and data is not None:
event_id = data['event_id']
if data is None:
metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
return
project = data.pop('project')
delete_raw_event(project, event_id)
Raven.tags_context({
'project': project,
})
try:
manager = EventManager(data)
manager.save(project)
finally:
if cache_key:
default_cache.delete(cache_key)
if start_time:
metrics.timing('events.time-to-process', time() - start_time,
instance=data['platform'])
示例8: try_repeated
def try_repeated(func):
"""
Runs a function a few times ignoring errors we see from GCS
due to what appears to be network issues. This is a temporary workaround
until we can find the root cause.
"""
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, 'func'):
# Partials
func_name = getattr(func.func, '__name__', '__unknown__')
else:
func_name = '__unknown__'
metrics_key = 'filestore.gcs.retry'
metrics_tags = {'function': func_name}
idx = 0
while True:
try:
result = func()
metrics_tags.update({'success': '1'})
metrics.timing(metrics_key, idx, tags=metrics_tags)
return result
except (DataCorruption, TransportError, RefreshError, RequestException, OpenSSLError) as e:
if idx >= GCS_RETRIES:
metrics_tags.update({'success': '0', 'exception_class': e.__class__.__name__})
metrics.timing(metrics_key, idx, tags=metrics_tags)
raise
idx += 1
示例9: index_event_tags
def index_event_tags(organization_id, project_id, event_id, tags,
group_id, environment_id, date_added=None, **kwargs):
from sentry import tagstore
Raven.tags_context({
'project': project_id,
})
create_event_tags_kwargs = {}
if date_added is not None:
create_event_tags_kwargs['date_added'] = date_added
metrics.timing(
'tagstore.tags_per_event',
len(tags),
tags={
'organization_id': organization_id,
}
)
tagstore.create_event_tags(
project_id=project_id,
group_id=group_id,
environment_id=environment_id,
event_id=event_id,
tags=tags,
**create_event_tags_kwargs
)
示例10: normalize
def normalize(self):
with metrics.timer('events.store.normalize.duration'):
self._normalize_impl()
metrics.timing(
'events.store.normalize.errors',
len(self._data.get("errors") or ()),
)
示例11: set
def set(self, key, attachments, timeout=None):
key = self.make_key(key)
for index, attachment in enumerate(attachments):
compressed = zlib.compress(attachment.data)
self.inner.set(u'{}:{}'.format(key, index), compressed, timeout, raw=True)
metrics_tags = {'type': attachment.type}
metrics.incr('attachments.received', tags=metrics_tags, skip_internal=False)
metrics.timing('attachments.blob-size.raw', len(attachment.data), tags=metrics_tags)
metrics.timing('attachments.blob-size.compressed', len(compressed), tags=metrics_tags)
meta = [attachment.meta() for attachment in attachments]
self.inner.set(key, meta, timeout, raw=False)
示例12: _record_time
def _record_time(self, request, status_code):
if not hasattr(request, "_view_path"):
return
metrics.incr(
"view.response", instance=request._view_path, tags={"method": request.method, "status_code": status_code}
)
if not hasattr(request, "_start_time"):
return
ms = int((time.time() - request._start_time) * 1000)
metrics.timing("view.duration", ms, instance=request._view_path, tags={"method": request.method})
示例13: _capture_stats
def _capture_stats(event, is_new):
# TODO(dcramer): limit platforms to... something?
group = event.group
platform = group.platform
if not platform:
return
platform = platform.split('-', 1)[0].split('_', 1)[0]
if is_new:
metrics.incr('events.unique')
metrics.incr('events.processed')
metrics.incr('events.processed.{platform}'.format(platform=platform))
metrics.timing('events.size.data', event.size)
示例14: _capture_stats
def _capture_stats(event, is_new):
group = event.group
platform = group.platform or group.project.platform
if not platform:
return
platform = PLATFORM_ROOTS.get(platform, platform)
if platform not in PLATFORM_LIST:
return
if is_new:
metrics.incr("events.unique", 1)
metrics.incr("events.processed", 1)
metrics.incr("events.processed.{platform}".format(platform=platform), 1)
metrics.timing("events.size.data", len(unicode(event.data)))
示例15: process
def process(self, request, project, key, auth, helper, data, attachments=None, **kwargs):
metrics.incr('events.total', skip_internal=False)
if not data:
track_outcome(project.organization_id, project.id, key.id, Outcome.INVALID, "no_data")
raise APIError('No JSON data was found')
remote_addr = request.META['REMOTE_ADDR']
event_manager = EventManager(
data,
project=project,
key=key,
auth=auth,
client_ip=remote_addr,
user_agent=helper.context.agent,
version=auth.version,
content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''),
)
del data
self.pre_normalize(event_manager, helper)
event_manager.normalize()
data = event_manager.get_data()
dict_data = dict(data)
data_size = len(json.dumps(dict_data))
if data_size > 10000000:
metrics.timing('events.size.rejected', data_size)
track_outcome(
project.organization_id,
project.id,
key.id,
Outcome.INVALID,
'too_large',
event_id=dict_data.get('event_id')
)
raise APIForbidden("Event size exceeded 10MB after normalization.")
metrics.timing(
'events.size.data.post_storeendpoint',
data_size,
tags={'project_id': project.id}
)
return process_event(event_manager, project,
key, remote_addr, helper, attachments)