本文整理汇总了Python中models.Response.urls_to_activity方法的典型用法代码示例。如果您正苦于以下问题:Python Response.urls_to_activity方法的具体用法?Python Response.urls_to_activity怎么用?Python Response.urls_to_activity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Response
的用法示例。
在下文中一共展示了Response.urls_to_activity方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: poll
# 需要导入模块: from models import Response [as 别名]
# 或者: from models.Response import urls_to_activity [as 别名]
#.........这里部分代码省略.........
# since it probably came from the user's activity, so prefer this one.
# background: https://github.com/snarfed/bridgy/issues/533
existing = responses.get(id)
if existing:
if source.gr_source.activity_changed(resp, existing, log=True):
logging.warning("Got two different versions of same response!\n%s\n%s", existing, resp)
resp["activities"].extend(existing.get("activities", []))
responses[id] = resp
#
# Step 3: filter out responses we've already seen
#
# seen responses (JSON objects) for each source are stored in its entity.
unchanged_responses = []
if source.seen_responses_cache_json:
for seen in json.loads(source.seen_responses_cache_json):
id = seen["id"]
resp = responses.get(id)
if resp and not source.gr_source.activity_changed(seen, resp, log=True):
unchanged_responses.append(seen)
del responses[id]
#
# Step 4: store new responses and enqueue propagate tasks
#
pruned_responses = []
for id, resp in responses.items():
resp_type = Response.get_type(resp)
activities = resp.pop("activities", [])
if not activities and resp_type == "post":
activities = [resp]
too_long = set()
urls_to_activity = {}
for i, activity in enumerate(activities):
# we'll usually have multiple responses for the same activity, and the
# objects in resp['activities'] are shared, so cache each activity's
# discovered webmention targets inside its object.
if "originals" not in activity or "mentions" not in activity:
activity["originals"], activity["mentions"] = original_post_discovery.discover(
source,
activity,
fetch_hfeed=True,
include_redirect_sources=False,
already_fetched_hfeeds=fetched_hfeeds,
)
targets = original_post_discovery.targets_for_response(
resp, originals=activity["originals"], mentions=activity["mentions"]
)
if targets:
logging.info(
"%s has %d webmention target(s): %s", activity.get("url"), len(targets), " ".join(targets)
)
for t in targets:
if len(t) <= _MAX_STRING_LENGTH:
urls_to_activity[t] = i
else:
logging.warning("Giving up on target URL over %s chars! %s", _MAX_STRING_LENGTH, t)
too_long.add(t[: _MAX_STRING_LENGTH - 4] + "...")
# store/update response entity. the prune_*() calls are important to
# remove circular references in link responses, which are their own
# activities. details in the step 2 comment above.
pruned_response = util.prune_response(resp)
pruned_responses.append(pruned_response)
示例2: backfeed
# 需要导入模块: from models import Response [as 别名]
# 或者: from models.Response import urls_to_activity [as 别名]
#.........这里部分代码省略.........
rsvps = Source.get_rsvps_from_event(obj)
# coalesce responses. drop any without ids
for resp in replies + likes + reactions + reposts + rsvps:
id = resp.get('id')
if not id:
logging.error('Skipping response without id: %s', json.dumps(resp, indent=2))
continue
if source.is_blocked(resp):
logging.info('Skipping response by blocked user: %s',
json.dumps(resp.get('author') or resp.get('actor'), indent=2))
continue
resp.setdefault('activities', []).append(activity)
# when we find two responses with the same id, the earlier one may have
# come from a link post or user mention, and this one is probably better
# since it probably came from the user's activity, so prefer this one.
# background: https://github.com/snarfed/bridgy/issues/533
existing = responses.get(id)
if existing:
if source.gr_source.activity_changed(resp, existing, log=True):
logging.warning('Got two different versions of same response!\n%s\n%s',
existing, resp)
resp['activities'].extend(existing.get('activities', []))
responses[id] = resp
#
# Step 3: filter out responses we've already seen
#
# seen responses (JSON objects) for each source are stored in its entity.
unchanged_responses = []
if source.seen_responses_cache_json:
for seen in json.loads(source.seen_responses_cache_json):
id = seen['id']
resp = responses.get(id)
if resp and not source.gr_source.activity_changed(seen, resp, log=True):
unchanged_responses.append(seen)
del responses[id]
#
# Step 4: store new responses and enqueue propagate tasks
#
pruned_responses = []
for id, resp in responses.items():
resp_type = Response.get_type(resp)
activities = resp.pop('activities', [])
if not activities and resp_type == 'post':
activities = [resp]
too_long = set()
urls_to_activity = {}
for i, activity in enumerate(activities):
# we'll usually have multiple responses for the same activity, and the
# objects in resp['activities'] are shared, so cache each activity's
# discovered webmention targets inside its object.
if 'originals' not in activity or 'mentions' not in activity:
activity['originals'], activity['mentions'] = \
original_post_discovery.discover(
source, activity, fetch_hfeed=True,
include_redirect_sources=False,
already_fetched_hfeeds=fetched_hfeeds)
targets = original_post_discovery.targets_for_response(
resp, originals=activity['originals'], mentions=activity['mentions'])
if targets:
logging.info('%s has %d webmention target(s): %s', activity.get('url'),
len(targets), ' '.join(targets))
for t in targets:
if len(t) <= _MAX_STRING_LENGTH:
urls_to_activity[t] = i
else:
logging.info('Giving up on target URL over %s chars! %s',
_MAX_STRING_LENGTH, t)
too_long.add(t[:_MAX_STRING_LENGTH - 4] + '...')
# store/update response entity. the prune_*() calls are important to
# remove circular references in link responses, which are their own
# activities. details in the step 2 comment above.
pruned_response = util.prune_response(resp)
pruned_responses.append(pruned_response)
resp_entity = Response(
id=id,
source=source.key,
activities_json=[json.dumps(util.prune_activity(a, source))
for a in activities],
response_json=json.dumps(pruned_response),
type=resp_type,
unsent=list(urls_to_activity.keys()),
failed=list(too_long),
original_posts=resp.get('originals', []))
if urls_to_activity and len(activities) > 1:
resp_entity.urls_to_activity=json.dumps(urls_to_activity)
resp_entity.get_or_save(source, restart=self.RESTART_EXISTING_TASKS)
# update cache
if pruned_responses:
source.updates['seen_responses_cache_json'] = json.dumps(
pruned_responses + unchanged_responses)