当前位置: 首页>>代码示例>>Python>>正文


Python default_cache.set函数代码示例

本文整理汇总了Python中sentry.cache.default_cache.set函数的典型用法代码示例。如果您正苦于以下问题:Python set函数的具体用法?Python set怎么用?Python set使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了set函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: process_event

def process_event(cache_key, start_time=None, **kwargs):
    from sentry.plugins import plugins

    data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'process'})
        error_logger.error('process.failed.empty', extra={'cache_key': cache_key})
        return

    project = data['project']
    Raven.tags_context({
        'project': project,
    })

    # TODO(dcramer): ideally we would know if data changed by default
    has_changed = False
    for plugin in plugins.all(version=2):
        processors = safe_execute(plugin.get_event_preprocessors, data=data, _with_transaction=False)
        for processor in (processors or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project, 'Project cannot be mutated by preprocessor'

    if has_changed:
        default_cache.set(cache_key, data, 3600)

    save_event.delay(cache_key=cache_key, data=None, start_time=start_time)
开发者ID:mvaled,项目名称:sentry,代码行数:31,代码来源:store.py

示例2: preprocess_event

def preprocess_event(cache_key=None, data=None, start_time=None, **kwargs):
    from sentry.plugins import plugins

    if cache_key:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'})
        logger.error('Data not available in preprocess_event (cache_key=%s)', cache_key)
        return

    project = data['project']

    # TODO(dcramer): ideally we would know if data changed by default
    has_changed = False
    for plugin in plugins.all(version=2):
        for processor in (safe_execute(plugin.get_event_preprocessors) or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project, 'Project cannot be mutated by preprocessor'

    if has_changed and cache_key:
        default_cache.set(cache_key, data, 3600)

    if cache_key:
        data = None
    save_event.delay(cache_key=cache_key, data=data, start_time=start_time)
开发者ID:noah-lee,项目名称:sentry,代码行数:30,代码来源:store.py

示例3: _update_cachefiles

    def _update_cachefiles(self, project, debug_files, cls):
        rv = []
        conversion_errors = {}

        for debug_file in debug_files:
            debug_id = debug_file.debug_id

            # Find all the known bad files we could not convert last time. We
            # use the debug identifier and file checksum to identify the source
            # DIF for historic reasons (debug_file.id would do, too).
            cache_key = 'scbe:%s:%s' % (debug_id, debug_file.file.checksum)
            err = default_cache.get(cache_key)
            if err is not None:
                conversion_errors[debug_id] = err
                continue

            # Download the original debug symbol and convert the object file to
            # a cache. This can either yield a cache object, an error or none of
            # the above. THE FILE DOWNLOAD CAN TAKE SIGNIFICANT TIME.
            with debug_file.file.getfile(as_tempfile=True) as tf:
                file, cache, err = self._update_cachefile(debug_file, tf.name, cls)

            # Store this conversion error so that we can skip subsequent
            # conversions. There might be concurrent conversions running for the
            # same debug file, however.
            if err is not None:
                default_cache.set(cache_key, err, CONVERSION_ERROR_TTL)
                conversion_errors[debug_id] = err
                continue

            if file is not None or cache is not None:
                rv.append((debug_id, file, cache))

        return rv, conversion_errors
开发者ID:getsentry,项目名称:sentry,代码行数:34,代码来源:debugfile.py

示例4: insert_data_to_database

 def insert_data_to_database(self, data):
     # we might be passed LazyData
     if isinstance(data, LazyData):
         data = dict(data.items())
     cache_key = 'e:{1}:{0}'.format(data['project'], data['event_id'])
     default_cache.set(cache_key, data, timeout=3600)
     preprocess_event.delay(cache_key=cache_key, start_time=time())
开发者ID:mvaled,项目名称:sentry,代码行数:7,代码来源:coreapi.py

示例5: get

    def get(self, request, wizard_hash=None):
        """
        This tries to retrieve and return the cache content if possible
        otherwise creates new cache
        """
        if wizard_hash is not None:
            key = '%s%s' % (SETUP_WIZARD_CACHE_KEY, wizard_hash)
            wizard_data = default_cache.get(key)

            if wizard_data is None:
                return Response(status=404)
            elif wizard_data == 'empty':
                # when we just created a clean cache
                return Response(status=400)

            return Response(serialize(wizard_data))
        else:
            # This creates a new available hash url for the project wizard
            rate_limited = ratelimits.is_limited(
                key='rl:setup-wizard:ip:%s' % request.META['REMOTE_ADDR'],
                limit=10,
            )
            if rate_limited:
                logger.info('setup-wizard.rate-limit')
                return Response(
                    {
                        'Too wizard requests',
                    }, status=403
                )
            wizard_hash = get_random_string(
                64, allowed_chars='abcdefghijklmnopqrstuvwxyz012345679')

            key = '%s%s' % (SETUP_WIZARD_CACHE_KEY, wizard_hash)
            default_cache.set(key, 'empty', SETUP_WIZARD_CACHE_TIMEOUT)
            return Response(serialize({'hash': wizard_hash}))
开发者ID:Kayle009,项目名称:sentry,代码行数:35,代码来源:setup_wizard.py

示例6: set_assemble_status

def set_assemble_status(project, checksum, state, detail=None):
    cache_key = 'assemble-status:%s' % _get_idempotency_id(
        project, checksum)

    # NB: Also cache successfully created debug files to avoid races between
    # multiple DIFs with the same identifier. On the downside, this blocks
    # re-uploads for 10 minutes.
    default_cache.set(cache_key, (state, detail), 600)
开发者ID:getsentry,项目名称:sentry,代码行数:8,代码来源:debugfile.py

示例7: set_assemble_status

def set_assemble_status(project, checksum, state, detail=None):
    cache_key = 'assemble-status:%s' % _get_idempotency_id(
        project, checksum)

    # If the state is okay we actually clear it from the cache because in
    # that case a project dsym file was created.
    if state == ChunkFileState.OK:
        default_cache.delete(cache_key)
    else:
        default_cache.set(cache_key, (state, detail), 300)
开发者ID:hosmelq,项目名称:sentry,代码行数:10,代码来源:dsymfile.py

示例8: insert_data_to_database

 def insert_data_to_database(self, data, start_time=None, from_reprocessing=False):
     if start_time is None:
         start_time = time()
     # we might be passed LazyData
     if isinstance(data, LazyData):
         data = dict(data.items())
     cache_key = 'e:{1}:{0}'.format(data['project'], data['event_id'])
     default_cache.set(cache_key, data, timeout=3600)
     task = from_reprocessing and \
         preprocess_event_from_reprocessing or preprocess_event
     task.delay(cache_key=cache_key, start_time=start_time,
                event_id=data['event_id'])
开发者ID:NuttasitBoonwat,项目名称:sentry,代码行数:12,代码来源:coreapi.py

示例9: public_dsn

def public_dsn():
    project_id = settings.SENTRY_FRONTEND_PROJECT or settings.SENTRY_PROJECT
    cache_key = 'dsn:%s' % (project_id, )

    result = default_cache.get(cache_key)
    if result is None:
        key = _get_project_key(project_id)
        if key:
            result = key.dsn_public
        else:
            result = ''
        default_cache.set(cache_key, result, 60)
    return result
开发者ID:alexandrul,项目名称:sentry,代码行数:13,代码来源:sentry_dsn.py

示例10: _update_cachefile

    def _update_cachefile(self, debug_file, tf):
        try:
            fo = FatObject.from_path(tf.name)
            o = fo.get_object(id=debug_file.debug_id)
            if o is None:
                return None, None
            symcache = o.make_symcache()
        except SymbolicError as e:
            default_cache.set('scbe:%s:%s' % (
                debug_file.debug_id, debug_file.file.checksum), e.message,
                CONVERSION_ERROR_TTL)

            if not isinstance(e, (SymCacheErrorMissingDebugSection, SymCacheErrorMissingDebugInfo)):
                logger.error('dsymfile.symcache-build-error',
                             exc_info=True, extra=dict(debug_id=debug_file.debug_id))

            return None, e.message

        # We seem to have this task running onconcurrently or some
        # other task might delete symcaches while this is running
        # which is why this requires a loop instead of just a retry
        # on get.
        for iteration in range(5):
            file = File.objects.create(
                name=debug_file.debug_id,
                type='project.symcache',
            )
            file.putfile(symcache.open_stream())
            try:
                with transaction.atomic():
                    return ProjectSymCacheFile.objects.get_or_create(
                        project=debug_file.project,
                        cache_file=file,
                        dsym_file=debug_file,
                        defaults=dict(
                            checksum=debug_file.file.checksum,
                            version=symcache.file_format_version,
                        )
                    )[0], None
            except IntegrityError:
                file.delete()
                try:
                    return ProjectSymCacheFile.objects.get(
                        project=debug_file.project,
                        dsym_file=debug_file,
                    ), None
                except ProjectSymCacheFile.DoesNotExist:
                    continue

        raise RuntimeError('Concurrency error on symcache update')
开发者ID:yogeshmangaj,项目名称:sentry,代码行数:50,代码来源:dsymfile.py

示例11: _do_process_event

def _do_process_event(cache_key, start_time, event_id):
    from sentry.plugins import plugins

    data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'process'})
        error_logger.error('process.failed.empty',
            extra={'cache_key': cache_key})
        return

    project = data['project']
    Raven.tags_context({
        'project': project,
    })
    has_changed = False

    # Stacktrace based event processors.  These run before anything else.
    new_data = process_stacktraces(data)
    if new_data is not None:
        has_changed = True
        data = new_data

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        processors = safe_execute(plugin.get_event_preprocessors,
                                  data=data, _with_transaction=False)
        for processor in (processors or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project, 'Project cannot be mutated by preprocessor'

    if has_changed:
        issues = data.get('processing_issues')
        if issues:
            create_failed_event(cache_key, project, list(issues.values()),
                event_id=event_id)
            return

        default_cache.set(cache_key, data, 3600)

    save_event.delay(cache_key=cache_key, data=None, start_time=start_time,
        event_id=event_id)
开发者ID:rlugojr,项目名称:sentry,代码行数:47,代码来源:store.py

示例12: reprocess_minidump

def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(
        project=project,
        request_id_cache_key=request_id_cache_key,
        create_task=create_minidump_task,
        minidump=make_buffered_slice_reader(minidump.data, None)
    )

    if handle_symbolicator_response_status(data, response):
        merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
开发者ID:yaoqi,项目名称:sentry,代码行数:35,代码来源:plugin.py

示例13: insert_data_to_database

    def insert_data_to_database(self, data, start_time=None,
                                from_reprocessing=False, attachments=None):
        if start_time is None:
            start_time = time()

        # we might be passed some subclasses of dict that fail dumping
        if isinstance(data, CANONICAL_TYPES):
            data = dict(data.items())

        cache_timeout = 3600
        cache_key = cache_key_for_event(data)
        default_cache.set(cache_key, data, cache_timeout)

        # Attachments will be empty or None if the "event-attachments" feature
        # is turned off. For native crash reports it will still contain the
        # crash dump (e.g. minidump) so we can load it during processing.
        if attachments is not None:
            attachment_cache.set(cache_key, attachments, cache_timeout)

        # NOTE: Project is bound to the context in most cases in production, which
        # is enough for us to do `projects:kafka-ingest` testing.
        project = self.context and self.context.project

        if project and features.has('projects:kafka-ingest', project=project):
            kafka.produce_sync(
                settings.KAFKA_PREPROCESS,
                value=json.dumps({
                    'cache_key': cache_key,
                    'start_time': start_time,
                    'from_reprocessing': from_reprocessing,
                    'data': data,
                }),
            )
        else:
            task = from_reprocessing and \
                preprocess_event_from_reprocessing or preprocess_event
            task.delay(cache_key=cache_key, start_time=start_time,
                       event_id=data['event_id'])
开发者ID:getsentry,项目名称:sentry,代码行数:38,代码来源:coreapi.py

示例14: insert_data_to_database

    def insert_data_to_database(self, data, start_time=None,
                                from_reprocessing=False, attachments=None):
        if start_time is None:
            start_time = time()

        # we might be passed some sublcasses of dict that fail dumping
        if isinstance(data, DOWNGRADE_DATA_TYPES):
            data = dict(data.items())

        cache_timeout = 3600
        cache_key = u'e:{1}:{0}'.format(data['project'], data['event_id'])
        default_cache.set(cache_key, data, cache_timeout)

        # Attachments will be empty or None if the "event-attachments" feature
        # is turned off. For native crash reports it will still contain the
        # crash dump (e.g. minidump) so we can load it during processing.
        if attachments is not None:
            attachment_cache.set(cache_key, attachments, cache_timeout)

        task = from_reprocessing and \
            preprocess_event_from_reprocessing or preprocess_event
        task.delay(cache_key=cache_key, start_time=start_time,
                   event_id=data['event_id'])
开发者ID:alexandrul,项目名称:sentry,代码行数:23,代码来源:coreapi.py

示例15: _do_process_event

def _do_process_event(cache_key, start_time, event_id, process_task):
    from sentry.plugins import plugins

    data = default_cache.get(cache_key)

    if data is None:
        metrics.incr(
            'events.failed',
            tags={
                'reason': 'cache',
                'stage': 'process'},
            skip_internal=False)
        error_logger.error('process.failed.empty', extra={'cache_key': cache_key})
        return

    data = CanonicalKeyDict(data)
    project = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project)

    has_changed = False

    # Fetch the reprocessing revision
    reprocessing_rev = reprocessing.get_reprocessing_revision(project)

    # Event enhancers.  These run before anything else.
    for plugin in plugins.all(version=2):
        enhancers = safe_execute(plugin.get_event_enhancers, data=data)
        for enhancer in (enhancers or ()):
            enhanced = safe_execute(enhancer, data)
            if enhanced:
                data = enhanced
                has_changed = True

    # Stacktrace based event processors.
    new_data = process_stacktraces(data)
    if new_data is not None:
        has_changed = True
        data = new_data

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        processors = safe_execute(
            plugin.get_event_preprocessors, data=data, _with_transaction=False
        )
        for processor in (processors or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project, 'Project cannot be mutated by preprocessor'

    if has_changed:
        issues = data.get('processing_issues')
        try:
            if issues and create_failed_event(
                cache_key, project, list(issues.values()),
                event_id=event_id, start_time=start_time,
                reprocessing_rev=reprocessing_rev
            ):
                return
        except RetryProcessing:
            # If `create_failed_event` indicates that we need to retry we
            # invoke outselves again.  This happens when the reprocessing
            # revision changed while we were processing.
            process_task.delay(cache_key, start_time=start_time,
                               event_id=event_id)
            return

        # We cannot persist canonical types in the cache, so we need to
        # downgrade this.
        if isinstance(data, CANONICAL_TYPES):
            data = dict(data.items())
        default_cache.set(cache_key, data, 3600)

    save_event.delay(
        cache_key=cache_key, data=None, start_time=start_time, event_id=event_id,
        project_id=project
    )
开发者ID:Kayle009,项目名称:sentry,代码行数:82,代码来源:store.py


注:本文中的sentry.cache.default_cache.set函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。