当前位置: 首页>>代码示例>>Python>>正文


Python StreamChangeCache.entity_has_changed方法代码示例

本文整理汇总了Python中synapse.util.caches.stream_change_cache.StreamChangeCache.entity_has_changed方法的典型用法代码示例。如果您正苦于以下问题:Python StreamChangeCache.entity_has_changed方法的具体用法?Python StreamChangeCache.entity_has_changed怎么用?Python StreamChangeCache.entity_has_changed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在synapse.util.caches.stream_change_cache.StreamChangeCache的用法示例。


在下文中一共展示了StreamChangeCache.entity_has_changed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: SlavedGroupServerStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedGroupServerStore(BaseSlavedStore):
    def __init__(self, db_conn, hs):
        super(SlavedGroupServerStore, self).__init__(db_conn, hs)

        self.hs = hs

        self._group_updates_id_gen = SlavedIdTracker(
            db_conn, "local_group_updates", "stream_id",
        )
        self._group_updates_stream_cache = StreamChangeCache(
            "_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(),
        )

    get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__
    get_group_stream_token = DataStore.get_group_stream_token.__func__
    get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__

    def stream_positions(self):
        result = super(SlavedGroupServerStore, self).stream_positions()
        result["groups"] = self._group_updates_id_gen.get_current_token()
        return result

    def process_replication_rows(self, stream_name, token, rows):
        if stream_name == "groups":
            self._group_updates_id_gen.advance(token)
            for row in rows:
                self._group_updates_stream_cache.entity_has_changed(
                    row.user_id, token
                )

        return super(SlavedGroupServerStore, self).process_replication_rows(
            stream_name, token, rows
        )
开发者ID:rubo77,项目名称:synapse,代码行数:35,代码来源:groups.py

示例2: test_has_any_entity_changed

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_has_any_entity_changed(self):
        """
        StreamChangeCache.has_any_entity_changed will return True if any
        entities have been changed since the provided stream position, and
        False if they have not.  If the cache has entries and the provided
        stream position is before it, it will return True, otherwise False if
        the cache has no entries.
        """
        cache = StreamChangeCache("#test", 1)

        # With no entities, it returns False for the past, present, and future.
        self.assertFalse(cache.has_any_entity_changed(0))
        self.assertFalse(cache.has_any_entity_changed(1))
        self.assertFalse(cache.has_any_entity_changed(2))

        # We add an entity
        cache.entity_has_changed("[email protected]", 2)

        # With an entity, it returns True for the past, the stream start
        # position, and False for the stream position the entity was changed
        # on and ones after it.
        self.assertTrue(cache.has_any_entity_changed(0))
        self.assertTrue(cache.has_any_entity_changed(1))
        self.assertFalse(cache.has_any_entity_changed(2))
        self.assertFalse(cache.has_any_entity_changed(3))
开发者ID:DoubleMalt,项目名称:synapse,代码行数:27,代码来源:test_stream_change_cache.py

示例3: test_has_entity_changed

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_has_entity_changed(self):
        """
        StreamChangeCache.entity_has_changed will mark entities as changed, and
        has_entity_changed will observe the changed entities.
        """
        cache = StreamChangeCache("#test", 3)

        cache.entity_has_changed("[email protected]", 6)
        cache.entity_has_changed("[email protected]", 7)

        # If it's been changed after that stream position, return True
        self.assertTrue(cache.has_entity_changed("[email protected]", 4))
        self.assertTrue(cache.has_entity_changed("[email protected]", 4))

        # If it's been changed at that stream position, return False
        self.assertFalse(cache.has_entity_changed("[email protected]", 6))

        # If there's no changes after that stream position, return False
        self.assertFalse(cache.has_entity_changed("[email protected]", 7))

        # If the entity does not exist, return False.
        self.assertFalse(cache.has_entity_changed("[email protected]", 7))

        # If we request before the stream cache's earliest known position,
        # return True, whether it's a known entity or not.
        self.assertTrue(cache.has_entity_changed("[email protected]", 0))
        self.assertTrue(cache.has_entity_changed("[email protected]", 0))
开发者ID:DoubleMalt,项目名称:synapse,代码行数:29,代码来源:test_stream_change_cache.py

示例4: SlavedReceiptsStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedReceiptsStore(BaseSlavedStore):

    def __init__(self, db_conn, hs):
        super(SlavedReceiptsStore, self).__init__(db_conn, hs)

        self._receipts_id_gen = SlavedIdTracker(
            db_conn, "receipts_linearized", "stream_id"
        )

        self._receipts_stream_cache = StreamChangeCache(
            "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
        )

    get_receipts_for_user = ReceiptsStore.__dict__["get_receipts_for_user"]
    get_linearized_receipts_for_room = (
        ReceiptsStore.__dict__["get_linearized_receipts_for_room"]
    )
    _get_linearized_receipts_for_rooms = (
        ReceiptsStore.__dict__["_get_linearized_receipts_for_rooms"]
    )
    get_last_receipt_event_id_for_user = (
        ReceiptsStore.__dict__["get_last_receipt_event_id_for_user"]
    )

    get_max_receipt_stream_id = DataStore.get_max_receipt_stream_id.__func__
    get_all_updated_receipts = DataStore.get_all_updated_receipts.__func__

    get_linearized_receipts_for_rooms = (
        DataStore.get_linearized_receipts_for_rooms.__func__
    )

    def stream_positions(self):
        result = super(SlavedReceiptsStore, self).stream_positions()
        result["receipts"] = self._receipts_id_gen.get_current_token()
        return result

    def process_replication(self, result):
        stream = result.get("receipts")
        if stream:
            self._receipts_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                position, room_id, receipt_type, user_id = row[:4]
                self.invalidate_caches_for_receipt(room_id, receipt_type, user_id)
                self._receipts_stream_cache.entity_has_changed(room_id, position)

        return super(SlavedReceiptsStore, self).process_replication(result)

    def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
        self.get_receipts_for_user.invalidate((user_id, receipt_type))
        self.get_linearized_receipts_for_room.invalidate_many((room_id,))
        self.get_last_receipt_event_id_for_user.invalidate(
            (user_id, room_id, receipt_type)
        )
开发者ID:0-T-0,项目名称:synapse,代码行数:55,代码来源:receipts.py

示例5: SlavedDeviceStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedDeviceStore(BaseSlavedStore):
    def __init__(self, db_conn, hs):
        super(SlavedDeviceStore, self).__init__(db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(
            db_conn, "device_lists_stream", "stream_id",
        )
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max,
        )
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max,
        )

    get_device_stream_token = __func__(DataStore.get_device_stream_token)
    get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed)
    get_devices_by_remote = __func__(DataStore.get_devices_by_remote)
    _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn)
    _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn)
    mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote)
    _mark_as_sent_devices_by_remote_txn = (
        __func__(DataStore._mark_as_sent_devices_by_remote_txn)
    )
    count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"]

    def stream_positions(self):
        result = super(SlavedDeviceStore, self).stream_positions()
        result["device_lists"] = self._device_list_id_gen.get_current_token()
        return result

    def process_replication_rows(self, stream_name, token, rows):
        if stream_name == "device_lists":
            self._device_list_id_gen.advance(token)
            for row in rows:
                self._device_list_stream_cache.entity_has_changed(
                    row.user_id, token
                )

                if row.destination:
                    self._device_list_federation_stream_cache.entity_has_changed(
                        row.destination, token
                    )
        return super(SlavedDeviceStore, self).process_replication_rows(
            stream_name, token, rows
        )
开发者ID:DoubleMalt,项目名称:synapse,代码行数:50,代码来源:devices.py

示例6: SlavedDeviceInboxStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedDeviceInboxStore(BaseSlavedStore):
    def __init__(self, db_conn, hs):
        super(SlavedDeviceInboxStore, self).__init__(db_conn, hs)
        self._device_inbox_id_gen = SlavedIdTracker(
            db_conn, "device_max_stream_id", "stream_id",
        )
        self._device_inbox_stream_cache = StreamChangeCache(
            "DeviceInboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token()
        )
        self._device_federation_outbox_stream_cache = StreamChangeCache(
            "DeviceFederationOutboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token()
        )

        self._last_device_delete_cache = ExpiringCache(
            cache_name="last_device_delete_cache",
            clock=self._clock,
            max_len=10000,
            expiry_ms=30 * 60 * 1000,
        )

    get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__
    get_new_messages_for_device = DataStore.get_new_messages_for_device.__func__
    get_new_device_msgs_for_remote = DataStore.get_new_device_msgs_for_remote.__func__
    delete_messages_for_device = DataStore.delete_messages_for_device.__func__
    delete_device_msgs_for_remote = DataStore.delete_device_msgs_for_remote.__func__

    def stream_positions(self):
        result = super(SlavedDeviceInboxStore, self).stream_positions()
        result["to_device"] = self._device_inbox_id_gen.get_current_token()
        return result

    def process_replication_rows(self, stream_name, token, rows):
        if stream_name == "to_device":
            self._device_inbox_id_gen.advance(token)
            for row in rows:
                if row.entity.startswith("@"):
                    self._device_inbox_stream_cache.entity_has_changed(
                        row.entity, token
                    )
                else:
                    self._device_federation_outbox_stream_cache.entity_has_changed(
                        row.entity, token
                    )
        return super(SlavedDeviceInboxStore, self).process_replication_rows(
            stream_name, token, rows
        )
开发者ID:rubo77,项目名称:synapse,代码行数:50,代码来源:deviceinbox.py

示例7: SlavedDeviceStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
    def __init__(self, db_conn, hs):
        super(SlavedDeviceStore, self).__init__(db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(
            db_conn, "device_lists_stream", "stream_id",
        )
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max,
        )
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max,
        )

    def stream_positions(self):
        result = super(SlavedDeviceStore, self).stream_positions()
        result["device_lists"] = self._device_list_id_gen.get_current_token()
        return result

    def process_replication_rows(self, stream_name, token, rows):
        if stream_name == "device_lists":
            self._device_list_id_gen.advance(token)
            for row in rows:
                self._invalidate_caches_for_devices(
                    token, row.user_id, row.destination,
                )
        return super(SlavedDeviceStore, self).process_replication_rows(
            stream_name, token, rows
        )

    def _invalidate_caches_for_devices(self, token, user_id, destination):
        self._device_list_stream_cache.entity_has_changed(
            user_id, token
        )

        if destination:
            self._device_list_federation_stream_cache.entity_has_changed(
                destination, token
            )

        self._get_cached_devices_for_user.invalidate((user_id,))
        self._get_cached_user_device.invalidate_many((user_id,))
        self.get_device_list_last_stream_id_for_remote.invalidate((user_id,))
开发者ID:matrix-org,项目名称:synapse,代码行数:48,代码来源:devices.py

示例8: SlavedPushRuleStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedPushRuleStore(SlavedEventStore):
    def __init__(self, db_conn, hs):
        super(SlavedPushRuleStore, self).__init__(db_conn, hs)
        self._push_rules_stream_id_gen = SlavedIdTracker(
            db_conn, "push_rules_stream", "stream_id",
        )
        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            self._push_rules_stream_id_gen.get_current_token(),
        )

    get_push_rules_for_user = PushRuleStore.__dict__["get_push_rules_for_user"]
    get_push_rules_enabled_for_user = (
        PushRuleStore.__dict__["get_push_rules_enabled_for_user"]
    )
    have_push_rules_changed_for_user = (
        DataStore.have_push_rules_changed_for_user.__func__
    )

    def get_push_rules_stream_token(self):
        return (
            self._push_rules_stream_id_gen.get_current_token(),
            self._stream_id_gen.get_current_token(),
        )

    def stream_positions(self):
        result = super(SlavedPushRuleStore, self).stream_positions()
        result["push_rules"] = self._push_rules_stream_id_gen.get_current_token()
        return result

    def process_replication(self, result):
        stream = result.get("push_rules")
        if stream:
            for row in stream["rows"]:
                position = row[0]
                user_id = row[2]
                self.get_push_rules_for_user.invalidate((user_id,))
                self.get_push_rules_enabled_for_user.invalidate((user_id,))
                self.push_rules_stream_cache.entity_has_changed(
                    user_id, position
                )

            self._push_rules_stream_id_gen.advance(int(stream["position"]))

        return super(SlavedPushRuleStore, self).process_replication(result)
开发者ID:0-T-0,项目名称:synapse,代码行数:47,代码来源:push_rule.py

示例9: UserDirectorySlaveStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class UserDirectorySlaveStore(
    SlavedEventStore,
    SlavedApplicationServiceStore,
    SlavedRegistrationStore,
    SlavedClientIpStore,
    UserDirectoryStore,
    BaseSlavedStore,
):
    def __init__(self, db_conn, hs):
        super(UserDirectorySlaveStore, self).__init__(db_conn, hs)

        events_max = self._stream_id_gen.get_current_token()
        curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
            db_conn, "current_state_delta_stream",
            entity_column="room_id",
            stream_column="stream_id",
            max_value=events_max,  # As we share the stream id with events token
            limit=1000,
        )
        self._curr_state_delta_stream_cache = StreamChangeCache(
            "_curr_state_delta_stream_cache", min_curr_state_delta_id,
            prefilled_cache=curr_state_delta_prefill,
        )

        self._current_state_delta_pos = events_max

    def stream_positions(self):
        result = super(UserDirectorySlaveStore, self).stream_positions()
        result["current_state_deltas"] = self._current_state_delta_pos
        return result

    def process_replication_rows(self, stream_name, token, rows):
        if stream_name == "current_state_deltas":
            self._current_state_delta_pos = token
            for row in rows:
                self._curr_state_delta_stream_cache.entity_has_changed(
                    row.room_id, token
                )
        return super(UserDirectorySlaveStore, self).process_replication_rows(
            stream_name, token, rows
        )
开发者ID:DoubleMalt,项目名称:synapse,代码行数:43,代码来源:user_dir.py

示例10: test_has_entity_changed_pops_off_start

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_has_entity_changed_pops_off_start(self):
        """
        StreamChangeCache.entity_has_changed will respect the max size and
        purge the oldest items upon reaching that max size.
        """
        cache = StreamChangeCache("#test", 1, max_size=2)

        cache.entity_has_changed("[email protected]", 2)
        cache.entity_has_changed("[email protected]", 3)
        cache.entity_has_changed("[email protected]", 4)

        # The cache is at the max size, 2
        self.assertEqual(len(cache._cache), 2)

        # The oldest item has been popped off
        self.assertTrue("[email protected]" not in cache._entity_to_key)

        # If we update an existing entity, it keeps the two existing entities
        cache.entity_has_changed("[email protected]", 5)
        self.assertEqual(
            set(["[email protected]", "[email protected]"]), set(cache._entity_to_key)
        )
开发者ID:DoubleMalt,项目名称:synapse,代码行数:24,代码来源:test_stream_change_cache.py

示例11: test_max_pos

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_max_pos(self):
        """
        StreamChangeCache.get_max_pos_of_last_change will return the most
        recent point where the entity could have changed.  If the entity is not
        known, the stream start is provided instead.
        """
        cache = StreamChangeCache("#test", 1)

        cache.entity_has_changed("[email protected]", 2)
        cache.entity_has_changed("[email protected]", 3)
        cache.entity_has_changed("[email protected]", 4)

        # Known entities will return the point where they were changed.
        self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 2)
        self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 3)
        self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 4)

        # Unknown entities will return the stream start position.
        self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 1)
开发者ID:DoubleMalt,项目名称:synapse,代码行数:21,代码来源:test_stream_change_cache.py

示例12: test_get_all_entities_changed

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_get_all_entities_changed(self):
        """
        StreamChangeCache.get_all_entities_changed will return all changed
        entities since the given position.  If the position is before the start
        of the known stream, it returns None instead.
        """
        cache = StreamChangeCache("#test", 1)

        cache.entity_has_changed("[email protected]", 2)
        cache.entity_has_changed("[email protected]", 3)
        cache.entity_has_changed("[email protected]", 4)

        self.assertEqual(
            cache.get_all_entities_changed(1),
            ["[email protected]", "[email protected]", "[email protected]"],
        )
        self.assertEqual(
            cache.get_all_entities_changed(2), ["[email protected]", "[email protected]"]
        )
        self.assertEqual(cache.get_all_entities_changed(3), ["[email protected]"])
        self.assertEqual(cache.get_all_entities_changed(0), None)
开发者ID:DoubleMalt,项目名称:synapse,代码行数:23,代码来源:test_stream_change_cache.py

示例13: SlavedEventStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]

#.........这里部分代码省略.........

    def stream_positions(self):
        result = super(SlavedEventStore, self).stream_positions()
        result["events"] = self._stream_id_gen.get_current_token()
        result["backfill"] = -self._backfill_id_gen.get_current_token()
        return result

    def process_replication(self, result):
        state_resets = set(
            r[0] for r in result.get("state_resets", {"rows": []})["rows"]
        )

        stream = result.get("events")
        if stream:
            self._stream_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                self._process_replication_row(
                    row, backfilled=False, state_resets=state_resets
                )

        stream = result.get("backfill")
        if stream:
            self._backfill_id_gen.advance(-int(stream["position"]))
            for row in stream["rows"]:
                self._process_replication_row(
                    row, backfilled=True, state_resets=state_resets
                )

        stream = result.get("forward_ex_outliers")
        if stream:
            self._stream_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                event_id = row[1]
                self._invalidate_get_event_cache(event_id)

        stream = result.get("backward_ex_outliers")
        if stream:
            self._backfill_id_gen.advance(-int(stream["position"]))
            for row in stream["rows"]:
                event_id = row[1]
                self._invalidate_get_event_cache(event_id)

        return super(SlavedEventStore, self).process_replication(result)

    def _process_replication_row(self, row, backfilled, state_resets):
        position = row[0]
        internal = json.loads(row[1])
        event_json = json.loads(row[2])
        event = FrozenEvent(event_json, internal_metadata_dict=internal)
        self.invalidate_caches_for_event(
            event, backfilled, reset_state=position in state_resets
        )

    def invalidate_caches_for_event(self, event, backfilled, reset_state):
        if reset_state:
            self._get_current_state_for_key.invalidate_all()
            self.get_rooms_for_user.invalidate_all()
            self.get_users_in_room.invalidate((event.room_id,))

        self._invalidate_get_event_cache(event.event_id)

        self.get_latest_event_ids_in_room.invalidate((event.room_id,))

        self.get_unread_event_push_actions_by_room_for_user.invalidate_many(
            (event.room_id,)
        )

        if not backfilled:
            self._events_stream_cache.entity_has_changed(
                event.room_id, event.internal_metadata.stream_ordering
            )

        # self.get_unread_event_push_actions_by_room_for_user.invalidate_many(
        #     (event.room_id,)
        # )

        if event.type == EventTypes.Redaction:
            self._invalidate_get_event_cache(event.redacts)

        if event.type == EventTypes.Member:
            self.get_rooms_for_user.invalidate((event.state_key,))
            self.get_users_in_room.invalidate((event.room_id,))
            self._membership_stream_cache.entity_has_changed(
                event.state_key, event.internal_metadata.stream_ordering
            )
            self.get_invited_rooms_for_user.invalidate((event.state_key,))

        if not event.is_state():
            return

        if backfilled:
            return

        if (not event.internal_metadata.is_invite_from_remote()
                and event.internal_metadata.is_outlier()):
            return

        self._get_current_state_for_key.invalidate((
            event.room_id, event.type, event.state_key
        ))
开发者ID:mebjas,项目名称:synapse,代码行数:104,代码来源:events.py

示例14: SlavedAccountDataStore

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
class SlavedAccountDataStore(BaseSlavedStore):

    def __init__(self, db_conn, hs):
        super(SlavedAccountDataStore, self).__init__(db_conn, hs)
        self._account_data_id_gen = SlavedIdTracker(
            db_conn, "account_data_max_stream_id", "stream_id",
        )
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache",
            self._account_data_id_gen.get_current_token(),
        )

    get_account_data_for_user = (
        AccountDataStore.__dict__["get_account_data_for_user"]
    )

    get_global_account_data_by_type_for_users = (
        AccountDataStore.__dict__["get_global_account_data_by_type_for_users"]
    )

    get_global_account_data_by_type_for_user = (
        AccountDataStore.__dict__["get_global_account_data_by_type_for_user"]
    )

    get_tags_for_user = TagsStore.__dict__["get_tags_for_user"]

    get_updated_tags = DataStore.get_updated_tags.__func__
    get_updated_account_data_for_user = (
        DataStore.get_updated_account_data_for_user.__func__
    )

    def get_max_account_data_stream_id(self):
        return self._account_data_id_gen.get_current_token()

    def stream_positions(self):
        result = super(SlavedAccountDataStore, self).stream_positions()
        position = self._account_data_id_gen.get_current_token()
        result["user_account_data"] = position
        result["room_account_data"] = position
        result["tag_account_data"] = position
        return result

    def process_replication(self, result):
        stream = result.get("user_account_data")
        if stream:
            self._account_data_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                position, user_id, data_type = row[:3]
                self.get_global_account_data_by_type_for_user.invalidate(
                    (data_type, user_id,)
                )
                self.get_account_data_for_user.invalidate((user_id,))
                self._account_data_stream_cache.entity_has_changed(
                    user_id, position
                )

        stream = result.get("room_account_data")
        if stream:
            self._account_data_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                position, user_id = row[:2]
                self.get_account_data_for_user.invalidate((user_id,))
                self._account_data_stream_cache.entity_has_changed(
                    user_id, position
                )

        stream = result.get("tag_account_data")
        if stream:
            self._account_data_id_gen.advance(int(stream["position"]))
            for row in stream["rows"]:
                position, user_id = row[:2]
                self.get_tags_for_user.invalidate((user_id,))
                self._account_data_stream_cache.entity_has_changed(
                    user_id, position
                )

        return super(SlavedAccountDataStore, self).process_replication(result)
开发者ID:0-T-0,项目名称:synapse,代码行数:79,代码来源:account_data.py

示例15: test_get_entities_changed

# 需要导入模块: from synapse.util.caches.stream_change_cache import StreamChangeCache [as 别名]
# 或者: from synapse.util.caches.stream_change_cache.StreamChangeCache import entity_has_changed [as 别名]
    def test_get_entities_changed(self):
        """
        StreamChangeCache.get_entities_changed will return the entities in the
        given list that have changed since the provided stream ID.  If the
        stream position is earlier than the earliest known position, it will
        return all of the entities queried for.
        """
        cache = StreamChangeCache("#test", 1)

        cache.entity_has_changed("[email protected]", 2)
        cache.entity_has_changed("[email protected]", 3)
        cache.entity_has_changed("[email protected]", 4)

        # Query all the entries, but mid-way through the stream. We should only
        # get the ones after that point.
        self.assertEqual(
            cache.get_entities_changed(
                ["[email protected]", "[email protected]", "[email protected]"], stream_pos=2
            ),
            set(["[email protected]", "[email protected]"]),
        )

        # Query all the entries mid-way through the stream, but include one
        # that doesn't exist in it. We shouldn't get back the one that doesn't
        # exist.
        self.assertEqual(
            cache.get_entities_changed(
                [
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                ],
                stream_pos=2,
            ),
            set(["[email protected]", "[email protected]"]),
        )

        # Query all the entries, but before the first known point. We will get
        # all the entries we queried for, including ones that don't exist.
        self.assertEqual(
            cache.get_entities_changed(
                [
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                ],
                stream_pos=0,
            ),
            set(
                [
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                    "[email protected]",
                ]
            ),
        )

        # Query a subset of the entries mid-way through the stream. We should
        # only get back the subset.
        self.assertEqual(
            cache.get_entities_changed(["[email protected]"], stream_pos=2),
            set(["[email protected]"]),
        )
开发者ID:DoubleMalt,项目名称:synapse,代码行数:68,代码来源:test_stream_change_cache.py


注:本文中的synapse.util.caches.stream_change_cache.StreamChangeCache.entity_has_changed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。