本文整理汇总了Python中synapse.util.logcontext.run_in_background函数的典型用法代码示例。如果您正苦于以下问题:Python run_in_background函数的具体用法?Python run_in_background怎么用?Python run_in_background使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_in_background函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start_purge_history
def start_purge_history(self, room_id, token,
delete_local_events=False):
"""Start off a history purge on a room.
Args:
room_id (str): The room to purge from
token (str): topological token to delete events before
delete_local_events (bool): True to delete local events as well as
remote ones
Returns:
str: unique ID for this purge transaction.
"""
if room_id in self._purges_in_progress_by_room:
raise SynapseError(
400,
"History purge already in progress for %s" % (room_id, ),
)
purge_id = random_string(16)
# we log the purge_id here so that it can be tied back to the
# request id in the log lines.
logger.info("[purge] starting purge_id %s", purge_id)
self._purges_by_id[purge_id] = PurgeStatus()
run_in_background(
self._purge_history,
purge_id, room_id, token, delete_local_events,
)
return purge_id
示例2: verify_json_objects_for_server
def verify_json_objects_for_server(self, server_and_json):
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
Args:
server_and_json (list): List of pairs of (server_name, json_object)
Returns:
List<Deferred>: for each input pair, a deferred indicating success
or failure to verify each json object's signature for the given
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
# a list of VerifyKeyRequests
verify_requests = []
handle = preserve_fn(_handle_key_deferred)
def process(server_name, json_object):
"""Process an entry in the request list
Given a (server_name, json_object) pair from the request list,
adds a key request to verify_requests, and returns a deferred which will
complete or fail (in the sentinel context) when verification completes.
"""
key_ids = signature_ids(json_object, server_name)
if not key_ids:
return defer.fail(
SynapseError(
400,
"Not signed by %s" % (server_name,),
Codes.UNAUTHORIZED,
)
)
logger.debug("Verifying for %s with key_ids %s",
server_name, key_ids)
# add the key request to the queue, but don't start it off yet.
verify_request = VerifyKeyRequest(
server_name, key_ids, json_object, defer.Deferred(),
)
verify_requests.append(verify_request)
# now run _handle_key_deferred, which will wait for the key request
# to complete and then do the verification.
#
# We want _handle_key_request to log to the right context, so we
# wrap it with preserve_fn (aka run_in_background)
return handle(verify_request)
results = [
process(server_name, json_object)
for server_name, json_object in server_and_json
]
if verify_requests:
run_in_background(self._start_key_lookups, verify_requests)
return results
示例3: _push_update
def _push_update(self, member, typing):
if self.hs.is_mine_id(member.user_id):
# Only send updates for changes to our own users.
run_in_background(self._push_remote, member, typing)
self._push_update_local(
member=member,
typing=typing
)
示例4: _start_user_parting
def _start_user_parting(self):
"""
Start the process that goes through the table of users
pending deactivation, if it isn't already running.
Returns:
None
"""
if not self._user_parter_running:
run_in_background(self._user_parter_loop)
示例5: process_replication_rows
def process_replication_rows(self, stream_name, token, rows):
# The federation stream contains things that we want to send out, e.g.
# presence, typing, etc.
if stream_name == "federation":
send_queue.process_rows_for_federation(self.federation_sender, rows)
run_in_background(self.update_token, token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
示例6: authenticate_request
def authenticate_request(self, request, content):
json_request = {
"method": request.method.decode('ascii'),
"uri": request.uri.decode('ascii'),
"destination": self.server_name,
"signatures": {},
}
if content is not None:
json_request["content"] = content
origin = None
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None and
origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
)
yield self.keyring.verify_json_for_server(origin, json_request)
logger.info("Request from %s", origin)
request.authenticated_entity = origin
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = yield self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings["retry_last_ts"]:
run_in_background(self._reset_retry_timings, origin)
defer.returnValue(origin)
示例7: _renew_attestations
def _renew_attestations(self):
"""Called periodically to check if we need to update any of our attestations
"""
now = self.clock.time_msec()
rows = yield self.store.get_attestations_need_renewals(
now + UPDATE_ATTESTATION_TIME_MS
)
@defer.inlineCallbacks
def _renew_attestation(group_id, user_id):
try:
if not self.is_mine_id(group_id):
destination = get_domain_from_id(group_id)
elif not self.is_mine_id(user_id):
destination = get_domain_from_id(user_id)
else:
logger.warn(
"Incorrectly trying to do attestations for user: %r in %r",
user_id, group_id,
)
yield self.store.remove_attestation_renewal(group_id, user_id)
return
attestation = self.attestations.create_attestation(group_id, user_id)
yield self.transport_client.renew_group_attestation(
destination, group_id, user_id,
content={"attestation": attestation},
)
yield self.store.update_attestation_renewal(
group_id, user_id, attestation
)
except RequestSendFailed as e:
logger.warning(
"Failed to renew attestation of %r in %r: %s",
user_id, group_id, e,
)
except Exception:
logger.exception("Error renewing attestation of %r in %r",
user_id, group_id)
for row in rows:
group_id = row["group_id"]
user_id = row["user_id"]
run_in_background(_renew_attestation, group_id, user_id)
示例8: get_keys_from_perspectives
def get_keys_from_perspectives(self, server_name_and_key_ids):
@defer.inlineCallbacks
def get_key(perspective_name, perspective_keys):
try:
result = yield self.get_server_verify_key_v2_indirect(
server_name_and_key_ids, perspective_name, perspective_keys
)
defer.returnValue(result)
except KeyLookupError as e:
logger.warning(
"Key lookup failed from %r: %s", perspective_name, e,
)
except Exception as e:
logger.exception(
"Unable to get key from %r: %s %s",
perspective_name,
type(e).__name__, str(e),
)
defer.returnValue({})
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[
run_in_background(get_key, p_name, p_keys)
for p_name, p_keys in self.perspective_servers.items()
],
consumeErrors=True,
).addErrback(unwrapFirstError))
union_of_keys = {}
for result in results:
for server_name, keys in result.items():
union_of_keys.setdefault(server_name, {}).update(keys)
defer.returnValue(union_of_keys)
示例9: fire
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
def do(observer):
def eb(failure):
logger.warning(
"%s signal observer %s failed: %r",
self.name, observer, failure,
exc_info=(
failure.type,
failure.value,
failure.getTracebackObject()))
return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
deferreds = [
run_in_background(do, o)
for o in self.observers
]
return make_deferred_yieldable(defer.gatherResults(
deferreds, consumeErrors=True,
))
示例10: fetch_or_execute
def fetch_or_execute(self, txn_key, fn, *args, **kwargs):
"""Fetches the response for this transaction, or executes the given function
to produce a response for this transaction.
Args:
txn_key (str): A key to ensure idempotency should fetch_or_execute be
called again at a later point in time.
fn (function): A function which returns a tuple of
(response_code, response_dict).
*args: Arguments to pass to fn.
**kwargs: Keyword arguments to pass to fn.
Returns:
Deferred which resolves to a tuple of (response_code, response_dict).
"""
if txn_key in self.transactions:
observable = self.transactions[txn_key][0]
else:
# execute the function instead.
deferred = run_in_background(fn, *args, **kwargs)
observable = ObservableDeferred(deferred)
self.transactions[txn_key] = (observable, self.clock.time_msec())
# if the request fails with an exception, remove it
# from the transaction map. This is done to ensure that we don't
# cache transient errors like rate-limiting errors, etc.
def remove_from_map(err):
self.transactions.pop(txn_key, None)
# we deliberately do not propagate the error any further, as we
# expect the observers to have reported it.
deferred.addErrback(remove_from_map)
return make_deferred_yieldable(observable.observe())
示例11: _on_new_room_event
def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
"""Notify any user streams that are interested in this room event"""
# poke any interested application service.
run_in_background(self._notify_app_services, room_stream_id)
if self.federation_sender:
self.federation_sender.notify_new_events(room_stream_id)
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
self._user_joined_room(event.state_key, event.room_id)
self.on_new_event(
"room_key", room_stream_id,
users=extra_users,
rooms=[event.room_id],
)
示例12: send
def send(self, service, events):
try:
txn = yield self.store.create_appservice_txn(
service=service,
events=events
)
service_is_up = yield self._is_service_up(service)
if service_is_up:
sent = yield txn.send(self.as_api)
if sent:
yield txn.complete(self.store)
else:
run_in_background(self._start_recoverer, service)
except Exception:
logger.exception("Error creating appservice transaction")
run_in_background(self._start_recoverer, service)
示例13: on_new_receipts
def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids):
yield run_on_reactor()
try:
# Need to subtract 1 from the minimum because the lower bound here
# is not inclusive
updated_receipts = yield self.store.get_all_updated_receipts(
min_stream_id - 1, max_stream_id
)
# This returns a tuple, user_id is at index 3
users_affected = set([r[3] for r in updated_receipts])
deferreds = []
for u in users_affected:
if u in self.pushers:
for p in self.pushers[u].values():
deferreds.append(
run_in_background(
p.on_new_receipts,
min_stream_id, max_stream_id,
)
)
yield make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True),
)
except Exception:
logger.exception("Exception in pusher on_new_receipts")
示例14: _test_run_in_background
def _test_run_in_background(self, function):
sentinel_context = LoggingContext.current_context()
callback_completed = [False]
def test():
context_one.request = "one"
d = function()
def cb(res):
self._check_test_key("one")
callback_completed[0] = True
return res
d.addCallback(cb)
return d
with LoggingContext() as context_one:
context_one.request = "one"
# fire off function, but don't wait on it.
logcontext.run_in_background(test)
self._check_test_key("one")
# now wait for the function under test to have run, and check that
# the logcontext is left in a sane state.
d2 = defer.Deferred()
def check_logcontext():
if not callback_completed[0]:
reactor.callLater(0.01, check_logcontext)
return
# make sure that the context was reset before it got thrown back
# into the reactor
try:
self.assertIs(LoggingContext.current_context(),
sentinel_context)
d2.callback(None)
except BaseException:
d2.errback(twisted.python.failure.Failure())
reactor.callLater(0.01, check_logcontext)
# test is done once d2 finishes
return d2
示例15: verify_json_objects_for_server
def verify_json_objects_for_server(self, server_and_json):
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
Args:
server_and_json (list): List of pairs of (server_name, json_object)
Returns:
List<Deferred>: for each input pair, a deferred indicating success
or failure to verify each json object's signature for the given
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
verify_requests = []
for server_name, json_object in server_and_json:
key_ids = signature_ids(json_object, server_name)
if not key_ids:
logger.warn("Request from %s: no supported signature keys",
server_name)
deferred = defer.fail(SynapseError(
400,
"Not signed with a supported algorithm",
Codes.UNAUTHORIZED,
))
else:
deferred = defer.Deferred()
logger.debug("Verifying for %s with key_ids %s",
server_name, key_ids)
verify_request = VerifyKeyRequest(
server_name, key_ids, json_object, deferred
)
verify_requests.append(verify_request)
run_in_background(self._start_key_lookups, verify_requests)
# Pass those keys to handle_key_deferred so that the json object
# signatures can be verified
handle = preserve_fn(_handle_key_deferred)
return [
handle(rq) for rq in verify_requests
]