当前位置: 首页>>代码示例>>Python>>正文


Python Linearizer.queue方法代码示例

本文整理汇总了Python中synapse.util.async_helpers.Linearizer.queue方法的典型用法代码示例。如果您正苦于以下问题:Python Linearizer.queue方法的具体用法?Python Linearizer.queue怎么用?Python Linearizer.queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在synapse.util.async_helpers.Linearizer的用法示例。


在下文中一共展示了Linearizer.queue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_cancellation

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
    def test_cancellation(self):
        linearizer = Linearizer()

        key = object()

        d1 = linearizer.queue(key)
        cm1 = yield d1

        d2 = linearizer.queue(key)
        self.assertFalse(d2.called)

        d3 = linearizer.queue(key)
        self.assertFalse(d3.called)

        d2.cancel()

        with cm1:
            pass

        self.assertTrue(d2.called)
        try:
            yield d2
            self.fail("Expected d2 to raise CancelledError")
        except CancelledError:
            pass

        with (yield d3):
            pass
开发者ID:DoubleMalt,项目名称:synapse,代码行数:30,代码来源:test_linearizer.py

示例2: test_linearizer

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
    def test_linearizer(self):
        linearizer = Linearizer()

        key = object()

        d1 = linearizer.queue(key)
        cm1 = yield d1

        d2 = linearizer.queue(key)
        self.assertFalse(d2.called)

        with cm1:
            self.assertFalse(d2.called)

        with (yield d2):
            pass
开发者ID:DoubleMalt,项目名称:synapse,代码行数:18,代码来源:test_linearizer.py

示例3: _LimitedHostnameResolver

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class _LimitedHostnameResolver(object):
    """Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
    """

    def __init__(self, resolver, max_dns_requests_in_flight):
        self._resolver = resolver
        self._limiter = Linearizer(
            name="dns_client_limiter", max_count=max_dns_requests_in_flight,
        )

    def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
                        addressTypes=None, transportSemantics='TCP'):
        # Note this is happening deep within the reactor, so we don't need to
        # worry about log contexts.

        # We need this function to return `resolutionReceiver` so we do all the
        # actual logic involving deferreds in a separate function.
        self._resolve(
            resolutionReceiver, hostName, portNumber,
            addressTypes, transportSemantics,
        )

        return resolutionReceiver

    @defer.inlineCallbacks
    def _resolve(self, resolutionReceiver, hostName, portNumber=0,
                 addressTypes=None, transportSemantics='TCP'):

        with (yield self._limiter.queue(())):
            # resolveHostName doesn't return a Deferred, so we need to hook into
            # the receiver interface to get told when resolution has finished.

            deferred = defer.Deferred()
            receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)

            self._resolver.resolveHostName(
                receiver, hostName, portNumber,
                addressTypes, transportSemantics,
            )

            yield deferred
开发者ID:matrix-org,项目名称:synapse,代码行数:43,代码来源:_base.py

示例4: test_multiple_entries

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
    def test_multiple_entries(self):
        limiter = Linearizer(max_count=3)

        key = object()

        d1 = limiter.queue(key)
        cm1 = yield d1

        d2 = limiter.queue(key)
        cm2 = yield d2

        d3 = limiter.queue(key)
        cm3 = yield d3

        d4 = limiter.queue(key)
        self.assertFalse(d4.called)

        d5 = limiter.queue(key)
        self.assertFalse(d5.called)

        with cm1:
            self.assertFalse(d4.called)
            self.assertFalse(d5.called)

        cm4 = yield d4
        self.assertFalse(d5.called)

        with cm3:
            self.assertFalse(d5.called)

        cm5 = yield d5

        with cm2:
            pass

        with cm4:
            pass

        with cm5:
            pass

        d6 = limiter.queue(key)
        with (yield d6):
            pass
开发者ID:DoubleMalt,项目名称:synapse,代码行数:46,代码来源:test_linearizer.py

示例5: StateResolutionHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class StateResolutionHandler(object):
    """Responsible for doing state conflict resolution.

    Note that the storage layer depends on this handler, so all functions must
    be storage-independent.
    """
    def __init__(self, hs):
        self.clock = hs.get_clock()

        # dict of set of event_ids -> _StateCacheEntry.
        self._state_cache = None
        self.resolve_linearizer = Linearizer(name="state_resolve_lock")

        self._state_cache = ExpiringCache(
            cache_name="state_cache",
            clock=self.clock,
            max_len=SIZE_OF_CACHE,
            expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000,
            iterable=True,
            reset_expiry_on_get=True,
        )

    @defer.inlineCallbacks
    @log_function
    def resolve_state_groups(
        self, room_id, room_version, state_groups_ids, event_map, state_res_store,
    ):
        """Resolves conflicts between a set of state groups

        Always generates a new state group (unless we hit the cache), so should
        not be called for a single state group

        Args:
            room_id (str): room we are resolving for (used for logging)
            room_version (str): version of the room
            state_groups_ids (dict[int, dict[(str, str), str]]):
                 map from state group id to the state in that state group
                (where 'state' is a map from state key to event id)

            event_map(dict[str,FrozenEvent]|None):
                a dict from event_id to event, for any events that we happen to
                have in flight (eg, those currently being persisted). This will be
                used as a starting point fof finding the state we need; any missing
                events will be requested via state_res_store.

                If None, all events will be fetched via state_res_store.

            state_res_store (StateResolutionStore)

        Returns:
            Deferred[_StateCacheEntry]: resolved state
        """
        logger.debug(
            "resolve_state_groups state_groups %s",
            state_groups_ids.keys()
        )

        group_names = frozenset(state_groups_ids.keys())

        with (yield self.resolve_linearizer.queue(group_names)):
            if self._state_cache is not None:
                cache = self._state_cache.get(group_names, None)
                if cache:
                    defer.returnValue(cache)

            logger.info(
                "Resolving state for %s with %d groups", room_id, len(state_groups_ids)
            )

            # start by assuming we won't have any conflicted state, and build up the new
            # state map by iterating through the state groups. If we discover a conflict,
            # we give up and instead use `resolve_events_with_store`.
            #
            # XXX: is this actually worthwhile, or should we just let
            # resolve_events_with_store do it?
            new_state = {}
            conflicted_state = False
            for st in itervalues(state_groups_ids):
                for key, e_id in iteritems(st):
                    if key in new_state:
                        conflicted_state = True
                        break
                    new_state[key] = e_id
                if conflicted_state:
                    break

            if conflicted_state:
                logger.info("Resolving conflicted state for %r", room_id)
                with Measure(self.clock, "state._resolve_events"):
                    new_state = yield resolve_events_with_store(
                        room_version,
                        list(itervalues(state_groups_ids)),
                        event_map=event_map,
                        state_res_store=state_res_store,
                    )

            # if the new state matches any of the input state groups, we can
            # use that state group again. Otherwise we will generate a state_id
            # which will be used as a cache key for future resolutions, but
            # not get persisted.
#.........这里部分代码省略.........
开发者ID:matrix-org,项目名称:synapse,代码行数:103,代码来源:__init__.py

示例6: RegistrationHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........

        # Now we have a matrix ID, bind it to the threepids we were given
        for c in threepidCreds:
            # XXX: This should be a deferred list, shouldn't it?
            yield self.identity_handler.bind_threepid(c, user_id)

    def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
        # don't allow people to register the server notices mxid
        if self._server_notices_mxid is not None:
            if user_id == self._server_notices_mxid:
                raise SynapseError(
                    400, "This user ID is reserved.",
                    errcode=Codes.EXCLUSIVE
                )

        # valid user IDs must not clash with any user ID namespaces claimed by
        # application services.
        services = self.store.get_app_services()
        interested_services = [
            s for s in services
            if s.is_interested_in_user(user_id)
            and s != allowed_appservice
        ]
        for service in interested_services:
            if service.is_exclusive_user(user_id):
                raise SynapseError(
                    400, "This user ID is reserved by an application service.",
                    errcode=Codes.EXCLUSIVE
                )

    @defer.inlineCallbacks
    def _generate_user_id(self, reseed=False):
        if reseed or self._next_generated_user_id is None:
            with (yield self._generate_user_id_linearizer.queue(())):
                if reseed or self._next_generated_user_id is None:
                    self._next_generated_user_id = (
                        yield self.store.find_next_generated_user_id_localpart()
                    )

        id = self._next_generated_user_id
        self._next_generated_user_id += 1
        defer.returnValue(str(id))

    @defer.inlineCallbacks
    def _validate_captcha(self, ip_addr, private_key, challenge, response):
        """Validates the captcha provided.

        Used only by c/s api v1

        Returns:
            dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.

        """
        response = yield self._submit_captcha(ip_addr, private_key, challenge,
                                              response)
        # parse Google's response. Lovely format..
        lines = response.split('\n')
        json = {
            "valid": lines[0] == 'true',
            "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
                         "error=%s" % lines[1]
        }
        defer.returnValue(json)

    @defer.inlineCallbacks
    def _submit_captcha(self, ip_addr, private_key, challenge, response):
开发者ID:matrix-org,项目名称:synapse,代码行数:70,代码来源:register.py

示例7: FederationSenderHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class FederationSenderHandler(object):
    """Processes the replication stream and forwards the appropriate entries
    to the federation sender.
    """
    def __init__(self, hs, replication_client):
        self.store = hs.get_datastore()
        self._is_mine_id = hs.is_mine_id
        self.federation_sender = hs.get_federation_sender()
        self.replication_client = replication_client

        self.federation_position = self.store.federation_out_pos_startup
        self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")

        self._last_ack = self.federation_position

        self._room_serials = {}
        self._room_typing = {}

    def on_start(self):
        # There may be some events that are persisted but haven't been sent,
        # so send them now.
        self.federation_sender.notify_new_events(
            self.store.get_room_max_stream_ordering()
        )

    def stream_positions(self):
        return {"federation": self.federation_position}

    def process_replication_rows(self, stream_name, token, rows):
        # The federation stream contains things that we want to send out, e.g.
        # presence, typing, etc.
        if stream_name == "federation":
            send_queue.process_rows_for_federation(self.federation_sender, rows)
            run_in_background(self.update_token, token)

        # We also need to poke the federation sender when new events happen
        elif stream_name == "events":
            self.federation_sender.notify_new_events(token)

        # ... and when new receipts happen
        elif stream_name == ReceiptsStream.NAME:
            run_as_background_process(
                "process_receipts_for_federation", self._on_new_receipts, rows,
            )

    @defer.inlineCallbacks
    def _on_new_receipts(self, rows):
        """
        Args:
            rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
                new receipts to be processed
        """
        for receipt in rows:
            # we only want to send on receipts for our own users
            if not self._is_mine_id(receipt.user_id):
                continue
            receipt_info = ReadReceipt(
                receipt.room_id,
                receipt.receipt_type,
                receipt.user_id,
                [receipt.event_id],
                receipt.data,
            )
            yield self.federation_sender.send_read_receipt(receipt_info)

    @defer.inlineCallbacks
    def update_token(self, token):
        try:
            self.federation_position = token

            # We linearize here to ensure we don't have races updating the token
            with (yield self._fed_position_linearizer.queue(None)):
                if self._last_ack < self.federation_position:
                    yield self.store.update_federation_out_pos(
                        "federation", self.federation_position
                    )

                    # We ACK this token over replication so that the master can drop
                    # its in memory queues
                    self.replication_client.send_federation_ack(self.federation_position)
                    self._last_ack = self.federation_position
        except Exception:
            logger.exception("Error updating federation stream position")
开发者ID:matrix-org,项目名称:synapse,代码行数:85,代码来源:federation_sender.py

示例8: _JoinedHostsCache

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class _JoinedHostsCache(object):
    """Cache for joined hosts in a room that is optimised to handle updates
    via state deltas.
    """

    def __init__(self, store, room_id):
        self.store = store
        self.room_id = room_id

        self.hosts_to_joined_users = {}

        self.state_group = object()

        self.linearizer = Linearizer("_JoinedHostsCache")

        self._len = 0

    @defer.inlineCallbacks
    def get_destinations(self, state_entry):
        """Get set of destinations for a state entry

        Args:
            state_entry(synapse.state._StateCacheEntry)
        """
        if state_entry.state_group == self.state_group:
            defer.returnValue(frozenset(self.hosts_to_joined_users))

        with (yield self.linearizer.queue(())):
            if state_entry.state_group == self.state_group:
                pass
            elif state_entry.prev_group == self.state_group:
                for (typ, state_key), event_id in iteritems(state_entry.delta_ids):
                    if typ != EventTypes.Member:
                        continue

                    host = intern_string(get_domain_from_id(state_key))
                    user_id = state_key
                    known_joins = self.hosts_to_joined_users.setdefault(host, set())

                    event = yield self.store.get_event(event_id)
                    if event.membership == Membership.JOIN:
                        known_joins.add(user_id)
                    else:
                        known_joins.discard(user_id)

                        if not known_joins:
                            self.hosts_to_joined_users.pop(host, None)
            else:
                joined_users = yield self.store.get_joined_users_from_state(
                    self.room_id, state_entry
                )

                self.hosts_to_joined_users = {}
                for user_id in joined_users:
                    host = intern_string(get_domain_from_id(user_id))
                    self.hosts_to_joined_users.setdefault(host, set()).add(user_id)

            if state_entry.state_group:
                self.state_group = state_entry.state_group
            else:
                self.state_group = object()
            self._len = sum(len(v) for v in itervalues(self.hosts_to_joined_users))
        defer.returnValue(frozenset(self.hosts_to_joined_users))

    def __len__(self):
        return self._len
开发者ID:matrix-org,项目名称:synapse,代码行数:68,代码来源:roommember.py

示例9: MediaRepository

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........

        responder = yield self.media_storage.fetch_media(file_info)
        yield respond_with_responder(
            request, responder, media_type, media_length, upload_name,
        )

    @defer.inlineCallbacks
    def get_remote_media(self, request, server_name, media_id, name):
        """Respond to requests for remote media.

        Args:
            request(twisted.web.http.Request)
            server_name (str): Remote server_name where the media originated.
            media_id (str): The media ID of the content (as defined by the
                remote server).
            name (str|None): Optional name that, if specified, will be used as
                the filename in the Content-Disposition header of the response.

        Returns:
            Deferred: Resolves once a response has successfully been written
                to request
        """
        if (
            self.federation_domain_whitelist is not None and
            server_name not in self.federation_domain_whitelist
        ):
            raise FederationDeniedError(server_name)

        self.mark_recently_accessed(server_name, media_id)

        # We linearize here to ensure that we don't try and download remote
        # media multiple times concurrently
        key = (server_name, media_id)
        with (yield self.remote_media_linearizer.queue(key)):
            responder, media_info = yield self._get_remote_media_impl(
                server_name, media_id,
            )

        # We deliberately stream the file outside the lock
        if responder:
            media_type = media_info["media_type"]
            media_length = media_info["media_length"]
            upload_name = name if name else media_info["upload_name"]
            yield respond_with_responder(
                request, responder, media_type, media_length, upload_name,
            )
        else:
            respond_404(request)

    @defer.inlineCallbacks
    def get_remote_media_info(self, server_name, media_id):
        """Gets the media info associated with the remote file, downloading
        if necessary.

        Args:
            server_name (str): Remote server_name where the media originated.
            media_id (str): The media ID of the content (as defined by the
                remote server).

        Returns:
            Deferred[dict]: The media_info of the file
        """
        if (
            self.federation_domain_whitelist is not None and
            server_name not in self.federation_domain_whitelist
        ):
开发者ID:DoubleMalt,项目名称:synapse,代码行数:70,代码来源:media_repository.py

示例10: EventCreationHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........
        """
        prev_state_ids = yield context.get_prev_state_ids(self.store)
        prev_event_id = prev_state_ids.get((event.type, event.state_key))
        prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
        if not prev_event:
            return

        if prev_event and event.user_id == prev_event.user_id:
            prev_content = encode_canonical_json(prev_event.content)
            next_content = encode_canonical_json(event.content)
            if prev_content == next_content:
                defer.returnValue(prev_event)
        return

    @defer.inlineCallbacks
    def create_and_send_nonmember_event(
        self,
        requester,
        event_dict,
        ratelimit=True,
        txn_id=None
    ):
        """
        Creates an event, then sends it.

        See self.create_event and self.send_nonmember_event.
        """

        # We limit the number of concurrent event sends in a room so that we
        # don't fork the DAG too much. If we don't limit then we can end up in
        # a situation where event persistence can't keep up, causing
        # extremities to pile up, which in turn leads to state resolution
        # taking longer.
        with (yield self.limiter.queue(event_dict["room_id"])):
            event, context = yield self.create_event(
                requester,
                event_dict,
                token_id=requester.access_token_id,
                txn_id=txn_id
            )

            spam_error = self.spam_checker.check_event_for_spam(event)
            if spam_error:
                if not isinstance(spam_error, string_types):
                    spam_error = "Spam is not permitted here"
                raise SynapseError(
                    403, spam_error, Codes.FORBIDDEN
                )

            yield self.send_nonmember_event(
                requester,
                event,
                context,
                ratelimit=ratelimit,
            )
        defer.returnValue(event)

    @measure_func("create_new_client_event")
    @defer.inlineCallbacks
    def create_new_client_event(self, builder, requester=None,
                                prev_events_and_hashes=None):
        """Create a new event for a local client

        Args:
            builder (EventBuilder):
开发者ID:matrix-org,项目名称:synapse,代码行数:69,代码来源:message.py

示例11: RoomMemberHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........
                    # Save back to user's m.direct account data
                    yield self.store.add_account_data_for_user(
                        user_id, "m.direct", direct_rooms,
                    )
                    break

        # Copy room tags if applicable
        room_tags = yield self.store.get_tags_for_room(
            user_id, old_room_id,
        )

        # Copy each room tag to the new room
        for tag, tag_content in room_tags.items():
            yield self.store.add_tag_to_room(
                user_id, new_room_id, tag, tag_content
            )

    @defer.inlineCallbacks
    def update_membership(
            self,
            requester,
            target,
            room_id,
            action,
            txn_id=None,
            remote_room_hosts=None,
            third_party_signed=None,
            ratelimit=True,
            content=None,
            require_consent=True,
    ):
        key = (room_id,)

        with (yield self.member_linearizer.queue(key)):
            result = yield self._update_membership(
                requester,
                target,
                room_id,
                action,
                txn_id=txn_id,
                remote_room_hosts=remote_room_hosts,
                third_party_signed=third_party_signed,
                ratelimit=ratelimit,
                content=content,
                require_consent=require_consent,
            )

        defer.returnValue(result)

    @defer.inlineCallbacks
    def _update_membership(
            self,
            requester,
            target,
            room_id,
            action,
            txn_id=None,
            remote_room_hosts=None,
            third_party_signed=None,
            ratelimit=True,
            content=None,
            require_consent=True,
    ):
        content_specified = bool(content)
        if content is None:
            content = {}
开发者ID:matrix-org,项目名称:synapse,代码行数:70,代码来源:room_member.py

示例12: DeviceListEduUpdater

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class DeviceListEduUpdater(object):
    "Handles incoming device list updates from federation and updates the DB"

    def __init__(self, hs, device_handler):
        self.store = hs.get_datastore()
        self.federation = hs.get_federation_client()
        self.clock = hs.get_clock()
        self.device_handler = device_handler

        self._remote_edu_linearizer = Linearizer(name="remote_device_list")

        # user_id -> list of updates waiting to be handled.
        self._pending_updates = {}

        # Recently seen stream ids. We don't bother keeping these in the DB,
        # but they're useful to have them about to reduce the number of spurious
        # resyncs.
        self._seen_updates = ExpiringCache(
            cache_name="device_update_edu",
            clock=self.clock,
            max_len=10000,
            expiry_ms=30 * 60 * 1000,
            iterable=True,
        )

    @defer.inlineCallbacks
    def incoming_device_list_update(self, origin, edu_content):
        """Called on incoming device list update from federation. Responsible
        for parsing the EDU and adding to pending updates list.
        """

        user_id = edu_content.pop("user_id")
        device_id = edu_content.pop("device_id")
        stream_id = str(edu_content.pop("stream_id"))  # They may come as ints
        prev_ids = edu_content.pop("prev_id", [])
        prev_ids = [str(p) for p in prev_ids]   # They may come as ints

        if get_domain_from_id(user_id) != origin:
            # TODO: Raise?
            logger.warning("Got device list update edu for %r from %r", user_id, origin)
            return

        room_ids = yield self.store.get_rooms_for_user(user_id)
        if not room_ids:
            # We don't share any rooms with this user. Ignore update, as we
            # probably won't get any further updates.
            return

        self._pending_updates.setdefault(user_id, []).append(
            (device_id, stream_id, prev_ids, edu_content)
        )

        yield self._handle_device_updates(user_id)

    @measure_func("_incoming_device_list_update")
    @defer.inlineCallbacks
    def _handle_device_updates(self, user_id):
        "Actually handle pending updates."

        with (yield self._remote_edu_linearizer.queue(user_id)):
            pending_updates = self._pending_updates.pop(user_id, [])
            if not pending_updates:
                # This can happen since we batch updates
                return

            # Given a list of updates we check if we need to resync. This
            # happens if we've missed updates.
            resync = yield self._need_to_do_resync(user_id, pending_updates)

            if resync:
                # Fetch all devices for the user.
                origin = get_domain_from_id(user_id)
                try:
                    result = yield self.federation.query_user_devices(origin, user_id)
                except NotRetryingDestination:
                    # TODO: Remember that we are now out of sync and try again
                    # later
                    logger.warn(
                        "Failed to handle device list update for %s,"
                        " we're not retrying the remote",
                        user_id,
                    )
                    # We abort on exceptions rather than accepting the update
                    # as otherwise synapse will 'forget' that its device list
                    # is out of date. If we bail then we will retry the resync
                    # next time we get a device list update for this user_id.
                    # This makes it more likely that the device lists will
                    # eventually become consistent.
                    return
                except FederationDeniedError as e:
                    logger.info(e)
                    return
                except Exception:
                    # TODO: Remember that we are now out of sync and try again
                    # later
                    logger.exception(
                        "Failed to handle device list update for %s", user_id
                    )
                    return

#.........这里部分代码省略.........
开发者ID:DoubleMalt,项目名称:synapse,代码行数:103,代码来源:device.py

示例13: PresenceHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........
                if affect_presence:
                    run_in_background(_end)

        defer.returnValue(_user_syncing())

    def get_currently_syncing_users(self):
        """Get the set of user ids that are currently syncing on this HS.
        Returns:
            set(str): A set of user_id strings.
        """
        if self.hs.config.use_presence:
            syncing_user_ids = {
                user_id for user_id, count in self.user_to_num_current_syncs.items()
                if count
            }
            for user_ids in self.external_process_to_current_syncs.values():
                syncing_user_ids.update(user_ids)
            return syncing_user_ids
        else:
            return set()

    @defer.inlineCallbacks
    def update_external_syncs_row(self, process_id, user_id, is_syncing, sync_time_msec):
        """Update the syncing users for an external process as a delta.

        Args:
            process_id (str): An identifier for the process the users are
                syncing against. This allows synapse to process updates
                as user start and stop syncing against a given process.
            user_id (str): The user who has started or stopped syncing
            is_syncing (bool): Whether or not the user is now syncing
            sync_time_msec(int): Time in ms when the user was last syncing
        """
        with (yield self.external_sync_linearizer.queue(process_id)):
            prev_state = yield self.current_state_for_user(user_id)

            process_presence = self.external_process_to_current_syncs.setdefault(
                process_id, set()
            )

            updates = []
            if is_syncing and user_id not in process_presence:
                if prev_state.state == PresenceState.OFFLINE:
                    updates.append(prev_state.copy_and_replace(
                        state=PresenceState.ONLINE,
                        last_active_ts=sync_time_msec,
                        last_user_sync_ts=sync_time_msec,
                    ))
                else:
                    updates.append(prev_state.copy_and_replace(
                        last_user_sync_ts=sync_time_msec,
                    ))
                process_presence.add(user_id)
            elif user_id in process_presence:
                updates.append(prev_state.copy_and_replace(
                    last_user_sync_ts=sync_time_msec,
                ))

            if not is_syncing:
                process_presence.discard(user_id)

            if updates:
                yield self._update_states(updates)

            self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
开发者ID:DoubleMalt,项目名称:synapse,代码行数:69,代码来源:presence.py

示例14: E2eRoomKeysHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]
class E2eRoomKeysHandler(object):
    """
    Implements an optional realtime backup mechanism for encrypted E2E megolm room keys.
    This gives a way for users to store and recover their megolm keys if they lose all
    their clients. It should also extend easily to future room key mechanisms.
    The actual payload of the encrypted keys is completely opaque to the handler.
    """

    def __init__(self, hs):
        self.store = hs.get_datastore()

        # Used to lock whenever a client is uploading key data.  This prevents collisions
        # between clients trying to upload the details of a new session, given all
        # clients belonging to a user will receive and try to upload a new session at
        # roughly the same time.  Also used to lock out uploads when the key is being
        # changed.
        self._upload_linearizer = Linearizer("upload_room_keys_lock")

    @defer.inlineCallbacks
    def get_room_keys(self, user_id, version, room_id=None, session_id=None):
        """Bulk get the E2E room keys for a given backup, optionally filtered to a given
        room, or a given session.
        See EndToEndRoomKeyStore.get_e2e_room_keys for full details.

        Args:
            user_id(str): the user whose keys we're getting
            version(str): the version ID of the backup we're getting keys from
            room_id(string): room ID to get keys for, for None to get keys for all rooms
            session_id(string): session ID to get keys for, for None to get keys for all
                sessions
        Raises:
            NotFoundError: if the backup version does not exist
        Returns:
            A deferred list of dicts giving the session_data and message metadata for
            these room keys.
        """

        # we deliberately take the lock to get keys so that changing the version
        # works atomically
        with (yield self._upload_linearizer.queue(user_id)):
            # make sure the backup version exists
            try:
                yield self.store.get_e2e_room_keys_version_info(user_id, version)
            except StoreError as e:
                if e.code == 404:
                    raise NotFoundError("Unknown backup version")
                else:
                    raise

            results = yield self.store.get_e2e_room_keys(
                user_id, version, room_id, session_id
            )

            defer.returnValue(results)

    @defer.inlineCallbacks
    def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
        """Bulk delete the E2E room keys for a given backup, optionally filtered to a given
        room or a given session.
        See EndToEndRoomKeyStore.delete_e2e_room_keys for full details.

        Args:
            user_id(str): the user whose backup we're deleting
            version(str): the version ID of the backup we're deleting
            room_id(string): room ID to delete keys for, for None to delete keys for all
                rooms
            session_id(string): session ID to delete keys for, for None to delete keys
                for all sessions
        Returns:
            A deferred of the deletion transaction
        """

        # lock for consistency with uploading
        with (yield self._upload_linearizer.queue(user_id)):
            yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)

    @defer.inlineCallbacks
    def upload_room_keys(self, user_id, version, room_keys):
        """Bulk upload a list of room keys into a given backup version, asserting
        that the given version is the current backup version.  room_keys are merged
        into the current backup as described in RoomKeysServlet.on_PUT().

        Args:
            user_id(str): the user whose backup we're setting
            version(str): the version ID of the backup we're updating
            room_keys(dict): a nested dict describing the room_keys we're setting:

        {
            "rooms": {
                "!abc:matrix.org": {
                    "sessions": {
                        "c0ff33": {
                            "first_message_index": 1,
                            "forwarded_count": 1,
                            "is_verified": false,
                            "session_data": "SSBBTSBBIEZJU0gK"
                        }
                    }
                }
            }
#.........这里部分代码省略.........
开发者ID:matrix-org,项目名称:synapse,代码行数:103,代码来源:e2e_room_keys.py

示例15: RegistrationHandler

# 需要导入模块: from synapse.util.async_helpers import Linearizer [as 别名]
# 或者: from synapse.util.async_helpers.Linearizer import queue [as 别名]

#.........这里部分代码省略.........
        # Now we have a matrix ID, bind it to the threepids we were given
        for c in threepidCreds:
            identity_handler = self.hs.get_handlers().identity_handler
            # XXX: This should be a deferred list, shouldn't it?
            yield identity_handler.bind_threepid(c, user_id)

    def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
        # don't allow people to register the server notices mxid
        if self._server_notices_mxid is not None:
            if user_id == self._server_notices_mxid:
                raise SynapseError(
                    400, "This user ID is reserved.",
                    errcode=Codes.EXCLUSIVE
                )

        # valid user IDs must not clash with any user ID namespaces claimed by
        # application services.
        services = self.store.get_app_services()
        interested_services = [
            s for s in services
            if s.is_interested_in_user(user_id)
            and s != allowed_appservice
        ]
        for service in interested_services:
            if service.is_exclusive_user(user_id):
                raise SynapseError(
                    400, "This user ID is reserved by an application service.",
                    errcode=Codes.EXCLUSIVE
                )

    @defer.inlineCallbacks
    def _generate_user_id(self, reseed=False):
        if reseed or self._next_generated_user_id is None:
            with (yield self._generate_user_id_linearizer.queue(())):
                if reseed or self._next_generated_user_id is None:
                    self._next_generated_user_id = (
                        yield self.store.find_next_generated_user_id_localpart()
                    )

        id = self._next_generated_user_id
        self._next_generated_user_id += 1
        defer.returnValue(str(id))

    @defer.inlineCallbacks
    def _validate_captcha(self, ip_addr, private_key, challenge, response):
        """Validates the captcha provided.

        Used only by c/s api v1

        Returns:
            dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.

        """
        response = yield self._submit_captcha(ip_addr, private_key, challenge,
                                              response)
        # parse Google's response. Lovely format..
        lines = response.split('\n')
        json = {
            "valid": lines[0] == 'true',
            "error_url": "http://www.google.com/recaptcha/api/challenge?" +
                         "error=%s" % lines[1]
        }
        defer.returnValue(json)

    @defer.inlineCallbacks
    def _submit_captcha(self, ip_addr, private_key, challenge, response):
开发者ID:DoubleMalt,项目名称:synapse,代码行数:70,代码来源:register.py


注:本文中的synapse.util.async_helpers.Linearizer.queue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。