本文整理汇总了Python中vendor.timezones.utilities.localtime_for_timezone函数的典型用法代码示例。如果您正苦于以下问题:Python localtime_for_timezone函数的具体用法?Python localtime_for_timezone怎么用?Python localtime_for_timezone使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了localtime_for_timezone函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_starred_stories
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
if page: offset = limit * (page - 1)
mstories = MStarredStory.objects(user_id=user.pk).order_by('-starred_date')[offset:offset+limit]
stories = Feed.format_stories(mstories)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['read_status'] = 1
story['starred'] = True
story['intelligence'] = {
'feed': 0,
'author': 0,
'tags': 0,
'title': 0,
}
logging.user(request, "~FCLoading starred stories: ~SB%s stories" % (len(stories)))
return dict(stories=stories)
示例2: test_localtime_for_timezone
def test_localtime_for_timezone(self):
self.assertEqual(
localtime_for_timezone(
datetime(2008, 6, 25, 18, 0, 0), "America/Denver"
).strftime("%m/%d/%Y %H:%M:%S"),
"06/25/2008 12:00:00"
)
示例3: load_features
def load_features(request):
user = get_user(request)
page = int(request.REQUEST.get('page', 0))
logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1))
features = Feature.objects.all()[page*3:(page+1)*3+1].values()
features = [{
'description': f['description'],
'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y")
} for f in features]
return features
示例4: localtime
def localtime(value, timezone):
return localtime_for_timezone(value, timezone)
示例5: localdatetime
def localdatetime(context, date, date_format):
user = get_user(context['user'])
date = localtime_for_timezone(date, user.profile.timezone).strftime(date_format)
return date
示例6: load_river_stories
#.........这里部分代码省略.........
feed_ids = [f[0] for f in feed_counts]
feed_last_reads = dict([(str(feed_id), feed_last_reads[feed_id]) for feed_id in feed_ids
if feed_id in feed_last_reads])
feed_counts = dict(feed_counts)
# After excluding read stories, all that's left are stories
# past the mark_read_date. Everything returned is guaranteed to be unread.
mstories = MStory.objects(
story_guid__nin=read_stories,
story_feed_id__in=feed_ids,
# story_date__gte=start - bottom_delta
).map_reduce("""function() {
var d = feed_last_reads[this[~story_feed_id]];
if (this[~story_date].getTime()/1000 > d) {
emit(this[~id], this);
}
}""",
"""function(key, values) {
return values[0];
}""",
output='inline',
scope={
'feed_last_reads': feed_last_reads
}
)
mstories = [story.value for story in mstories if story and story.value]
mstories = sorted(mstories, cmp=lambda x, y: cmp(story_score(y, bottom_delta), story_score(x, bottom_delta)))
# story_feed_counts = defaultdict(int)
# mstories_pruned = []
# for story in mstories:
# print story['story_title'], story_feed_counts[story['story_feed_id']]
# if story_feed_counts[story['story_feed_id']] >= 3: continue
# mstories_pruned.append(story)
# story_feed_counts[story['story_feed_id']] += 1
stories = []
for i, story in enumerate(mstories):
if i < offset: continue
if i >= offset + limit: break
stories.append(bunch(story))
stories = Feed.format_stories(stories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
# Find starred stories
starred_stories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=found_feed_ids
).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date)
for story in starred_stories])
# Intelligence classifiers for all feeds involved
def sort_by_feed(classifiers):
feed_classifiers = defaultdict(list)
for classifier in classifiers:
feed_classifiers[classifier.feed_id].append(classifier)
return feed_classifiers
classifier_feeds = sort_by_feed(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_authors = sort_by_feed(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_titles = sort_by_feed(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_tags = sort_by_feed(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifiers = {}
for feed_id in found_feed_ids:
classifiers[feed_id] = get_classifiers_for_user(user, feed_id, classifier_feeds[feed_id],
classifier_authors[feed_id],
classifier_titles[feed_id],
classifier_tags[feed_id])
# Just need to format stories
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds[story['story_feed_id']], story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors[story['story_feed_id']], story),
'tags': apply_classifier_tags(classifier_tags[story['story_feed_id']], story),
'title': apply_classifier_titles(classifier_titles[story['story_feed_id']], story),
}
diff = datetime.datetime.utcnow() - start
timediff = float("%s.%.2s" % (diff.seconds, (diff.microseconds / 1000)))
logging.user(request, "~FCLoading river stories: page %s - ~SB%s/%s "
"stories ~SN(%s/%s/%s feeds) ~FB(%s seconds)" %
(page, len(stories), len(mstories), len(found_feed_ids),
len(feed_ids), len(original_feed_ids), timediff))
if new_flag:
return dict(stories=stories, classifiers=classifiers)
else:
logging.user(request, "~BR~FCNo new flag on river")
return dict(stories=stories)
示例7: load_single_feed
def load_single_feed(request, feed_id):
start = time.time()
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 12))
page = int(request.REQUEST.get('page', 1))
dupe_feed_id = None
userstories_db = None
if page: offset = limit * (page-1)
if not feed_id: raise Http404
try:
feed = Feed.objects.get(id=feed_id)
except Feed.DoesNotExist:
feed_address = request.REQUEST.get('feed_address')
dupe_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if dupe_feed:
feed = dupe_feed[0].feed
dupe_feed_id = feed_id
else:
raise Http404
stories = feed.get_stories(offset, limit)
# Get intelligence classifier for user
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
checkpoint1 = time.time()
usersub = UserSubscription.objects.get(user=user, feed=feed)
userstories = []
if usersub and stories:
story_ids = [story['id'] for story in stories]
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id=feed.pk,
story_id__in=story_ids).only('story_id')
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id=feed_id,
story_guid__in=story_ids).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
userstories = set(us.story_id for us in userstories_db)
checkpoint2 = time.time()
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
if usersub:
if story['id'] in userstories:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] < usersub.mark_read_date:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] > usersub.last_read_date:
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
checkpoint3 = time.time()
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
classifiers = get_classifiers_for_user(user, feed_id, classifier_feeds,
classifier_authors, classifier_titles, classifier_tags)
if usersub:
usersub.feed_opens += 1
usersub.save()
diff1 = checkpoint1-start
diff2 = checkpoint2-start
diff3 = checkpoint3-start
timediff = time.time()-start
last_update = relative_timesince(feed.last_update)
logging.user(request, "~FYLoading feed: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s(%s)/%.4s~SN)" % (
feed.feed_title[:32], ('~SN/p%s' % page) if page > 1 else '', timediff,
diff1, diff2, userstories_db and userstories_db.count() or '~SN0~SB', diff3))
FeedLoadtime.objects.create(feed=feed, loadtime=timediff)
data = dict(stories=stories,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
last_update=last_update,
feed_id=feed.pk)
#.........这里部分代码省略.........
示例8: sort_by_feed
return feed_classifiers
classifier_feeds = sort_by_feed(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_authors = sort_by_feed(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_titles = sort_by_feed(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifier_tags = sort_by_feed(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_feed_ids))
classifiers = {}
for feed_id in found_feed_ids:
classifiers[feed_id] = get_classifiers_for_user(user, feed_id, classifier_feeds[feed_id],
classifier_authors[feed_id],
classifier_titles[feed_id],
classifier_tags[feed_id])
# Just need to format stories
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds[story['story_feed_id']], story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors[story['story_feed_id']], story),
'tags': apply_classifier_tags(classifier_tags[story['story_feed_id']], story),
'title': apply_classifier_titles(classifier_titles[story['story_feed_id']], story),
}
示例9: load_feed_statistics
def load_feed_statistics(request, feed_id):
user = get_user(request)
timezone = user.profile.timezone
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
feed.update_all_statistics()
feed.set_next_scheduled_update(verbose=True, skip_scheduling=True)
feed.save_feed_story_history_statistics()
feed.save_classifier_counts()
# Dates of last and next update
stats['active'] = feed.active
stats['last_update'] = relative_timesince(feed.last_update)
stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
stats['push'] = feed.is_push
if feed.is_push:
try:
stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires,
timezone).strftime("%Y-%m-%d %H:%M:%S")
except PushSubscription.DoesNotExist:
stats['push_expires'] = 'Missing push'
feed.is_push = False
feed.save()
# Minutes between updates
update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False)
stats['update_interval_minutes'] = update_interval_minutes
original_active_premium_subscribers = feed.active_premium_subscribers
original_premium_subscribers = feed.premium_subscribers
feed.active_premium_subscribers = max(feed.active_premium_subscribers+1, 1)
feed.premium_subscribers += 1
premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False,
premium_speed=True)
feed.active_premium_subscribers = original_active_premium_subscribers
feed.premium_subscribers = original_premium_subscribers
stats['premium_update_interval_minutes'] = premium_update_interval_minutes
stats['errors_since_good'] = feed.errors_since_good
# Stories per month - average and month-by-month breakout
average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history
stats['average_stories_per_month'] = average_stories_per_month
story_count_history = story_count_history and json.decode(story_count_history)
if story_count_history and isinstance(story_count_history, dict):
stats['story_count_history'] = story_count_history['months']
stats['story_days_history'] = story_count_history['days']
stats['story_hours_history'] = story_count_history['hours']
else:
stats['story_count_history'] = story_count_history
# Rotate hours to match user's timezone offset
localoffset = timezone.utcoffset(datetime.datetime.utcnow())
hours_offset = int(localoffset.total_seconds() / 3600)
rotated_hours = {}
for hour, value in stats['story_hours_history'].items():
rotated_hours[str(int(hour)+hours_offset)] = value
stats['story_hours_history'] = rotated_hours
# Subscribers
stats['subscriber_count'] = feed.num_subscribers
stats['num_subscribers'] = feed.num_subscribers
stats['stories_last_month'] = feed.stories_last_month
stats['last_load_time'] = feed.last_load_time
stats['premium_subscribers'] = feed.premium_subscribers
stats['active_subscribers'] = feed.active_subscribers
stats['active_premium_subscribers'] = feed.active_premium_subscribers
# Classifier counts
stats['classifier_counts'] = json.decode(feed.data.feed_classifier_counts)
# Fetch histories
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats['feed_fetch_history'] = fetch_history['feed_fetch_history']
stats['page_fetch_history'] = fetch_history['page_fetch_history']
stats['feed_push_history'] = fetch_history['push_history']
logging.user(request, "~FBStatistics: ~SB%s" % (feed))
return stats
示例10: load_feed_statistics
def load_feed_statistics(request, feed_id):
user = get_user(request)
timezone = user.profile.timezone
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
feed.update_all_statistics()
feed.set_next_scheduled_update(verbose=True, skip_scheduling=True)
feed.save_feed_story_history_statistics()
feed.save_classifier_counts()
# Dates of last and next update
stats["active"] = feed.active
stats["last_update"] = relative_timesince(feed.last_update)
stats["next_update"] = relative_timeuntil(feed.next_scheduled_update)
stats["push"] = feed.is_push
if feed.is_push:
try:
stats["push_expires"] = localtime_for_timezone(feed.push.lease_expires, timezone).strftime(
"%Y-%m-%d %H:%M:%S"
)
except PushSubscription.DoesNotExist:
stats["push_expires"] = "Missing push"
feed.is_push = False
feed.save()
# Minutes between updates
update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False)
stats["update_interval_minutes"] = update_interval_minutes
original_active_premium_subscribers = feed.active_premium_subscribers
original_premium_subscribers = feed.premium_subscribers
feed.active_premium_subscribers = max(feed.active_premium_subscribers + 1, 1)
feed.premium_subscribers += 1
premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False, premium_speed=True)
feed.active_premium_subscribers = original_active_premium_subscribers
feed.premium_subscribers = original_premium_subscribers
stats["premium_update_interval_minutes"] = premium_update_interval_minutes
stats["errors_since_good"] = feed.errors_since_good
# Stories per month - average and month-by-month breakout
average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history
stats["average_stories_per_month"] = average_stories_per_month
stats["story_count_history"] = story_count_history and json.decode(story_count_history)
# Subscribers
stats["subscriber_count"] = feed.num_subscribers
stats["num_subscribers"] = feed.num_subscribers
stats["stories_last_month"] = feed.stories_last_month
stats["last_load_time"] = feed.last_load_time
stats["premium_subscribers"] = feed.premium_subscribers
stats["active_subscribers"] = feed.active_subscribers
stats["active_premium_subscribers"] = feed.active_premium_subscribers
# Classifier counts
stats["classifier_counts"] = json.decode(feed.data.feed_classifier_counts)
# Fetch histories
fetch_history = MFetchHistory.feed(feed_id, timezone=timezone)
stats["feed_fetch_history"] = fetch_history["feed_fetch_history"]
stats["page_fetch_history"] = fetch_history["page_fetch_history"]
stats["feed_push_history"] = fetch_history["push_history"]
logging.user(request, "~FBStatistics: ~SB%s" % (feed))
return stats
示例11: load_social_stories
def load_social_stories(request, user_id, username=None):
start = time.time()
user = get_user(request)
social_user_id = int(user_id)
social_user = get_object_or_404(User, pk=social_user_id)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 6))
page = request.REQUEST.get('page')
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'all')
stories = []
if page: offset = limit * (int(page) - 1)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
social_profile = MSocialProfile.get_user(social_user.pk)
try:
socialsub = MSocialSubscription.objects.get(user_id=user.pk, subscription_user_id=social_user_id)
except MSocialSubscription.DoesNotExist:
socialsub = None
mstories = MSharedStory.objects(user_id=social_user.pk).order_by('-shared_date')[offset:offset+limit]
stories = Feed.format_stories(mstories)
if socialsub and (read_filter == 'unread' or order == 'oldest'):
story_ids = socialsub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit)
story_date_order = "%sshared_date" % ('' if order == 'oldest' else '-')
if story_ids:
mstories = MSharedStory.objects(user_id=social_user.pk,
story_db_id__in=story_ids).order_by(story_date_order)
stories = Feed.format_stories(mstories)
else:
mstories = MSharedStory.objects(user_id=social_user.pk).order_by('-shared_date')[offset:offset+limit]
stories = Feed.format_stories(mstories)
if not stories:
return dict(stories=[])
checkpoint1 = time.time()
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersubs = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids)
usersubs_map = dict((sub.feed_id, sub) for sub in usersubs)
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersubs_map.keys())))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
date_delta = UNREAD_CUTOFF
if socialsub and date_delta < socialsub.mark_read_date:
date_delta = socialsub.mark_read_date
# Get intelligence classifier for user
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, social_user_id=social_user_id))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, social_user_id=social_user_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, social_user_id=social_user_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, social_user_id=social_user_id))
# Merge with feed specific classifiers
classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=user.pk, feed_id__in=story_feed_ids))
checkpoint2 = time.time()
story_ids = [story['id'] for story in stories]
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id__in=story_feed_ids,
story_id__in=story_ids).only('story_id')
userstories = set(us.story_id for us in userstories_db)
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id__in=story_feed_ids,
story_guid__in=story_ids).only('story_guid', 'starred_date')
shared_stories = MSharedStory.objects(user_id=user.pk,
story_feed_id__in=story_feed_ids,
story_guid__in=story_ids)\
.only('story_guid', 'shared_date', 'comments')
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
shared_stories = dict([(story.story_guid, dict(shared_date=story.shared_date, comments=story.comments))
for story in shared_stories])
for story in stories:
story['social_user_id'] = social_user_id
story_feed_id = story['story_feed_id']
# story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
shared_date = localtime_for_timezone(story['shared_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(shared_date, now)
story['long_parsed_date'] = format_story_link_date__long(shared_date, now)
if not socialsub:
story['read_status'] = 1
elif story['id'] in userstories:
story['read_status'] = 1
elif story['shared_date'] < date_delta:
story['read_status'] = 1
elif not usersubs_map.get(story_feed_id):
story['read_status'] = 0
elif not story.get('read_status') and story['story_date'] < usersubs_map[story_feed_id].mark_read_date:
story['read_status'] = 1
#.........这里部分代码省略.........
示例12: load_social_page
def load_social_page(request, user_id, username=None, **kwargs):
start = time.time()
user = request.user
social_user_id = int(user_id)
social_user = get_object_or_404(User, pk=social_user_id)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 6))
page = request.REQUEST.get('page')
format = request.REQUEST.get('format', None)
has_next_page = False
feed_id = kwargs.get('feed_id') or request.REQUEST.get('feed_id')
if page: offset = limit * (int(page) - 1)
user_social_profile = None
if user.is_authenticated():
user_social_profile = MSocialProfile.get_user(user.pk)
social_profile = MSocialProfile.get_user(social_user_id)
params = dict(user_id=social_user.pk)
if feed_id:
params['story_feed_id'] = feed_id
mstories = MSharedStory.objects(**params).order_by('-shared_date')[offset:offset+limit+1]
stories = Feed.format_stories(mstories)
if len(stories) > limit:
has_next_page = True
stories = stories[:-1]
checkpoint1 = time.time()
if not stories:
params = {
"user": user,
"stories": [],
"feeds": {},
"social_user": social_user,
"social_profile": social_profile,
'user_social_profile' : json.encode(user_social_profile and user_social_profile.page()),
}
template = 'social/social_page.xhtml'
return render_to_response(template, params, context_instance=RequestContext(request))
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
feeds = Feed.objects.filter(pk__in=story_feed_ids)
feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in feeds)
for story in stories:
if story['story_feed_id'] in feeds:
# Feed could have been deleted.
story['feed'] = feeds[story['story_feed_id']]
shared_date = localtime_for_timezone(story['shared_date'], social_user.profile.timezone)
story['shared_date'] = shared_date
stories, profiles = MSharedStory.stories_with_comments_and_profiles(stories, social_user.pk,
check_all=True)
checkpoint2 = time.time()
if user.is_authenticated():
for story in stories:
if user.pk in story['shared_by_friends'] or user.pk in story['shared_by_public']:
story['shared_by_user'] = True
shared_story = MSharedStory.objects.get(user_id=user.pk,
story_feed_id=story['story_feed_id'],
story_guid=story['id'])
story['user_comments'] = shared_story.comments
stories = MSharedStory.attach_users_to_stories(stories, profiles)
params = {
'social_user' : social_user,
'stories' : stories,
'user_social_profile' : json.encode(user_social_profile and user_social_profile.page()),
'social_profile': social_profile,
'feeds' : feeds,
'user_profile' : hasattr(user, 'profile') and user.profile,
'has_next_page' : has_next_page,
'holzer_truism' : random.choice(jennyholzer.TRUISMS) #if not has_next_page else None
}
diff1 = checkpoint1-start
diff2 = checkpoint2-start
timediff = time.time()-start
logging.user(request, "~FYLoading ~FMsocial page~FY: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s~SN)" % (
social_profile.title[:22], ('~SN/p%s' % page) if page > 1 else '', timediff,
diff1, diff2))
if format == 'html':
template = 'social/social_stories.xhtml'
else:
template = 'social/social_page.xhtml'
return render_to_response(template, params, context_instance=RequestContext(request))
示例13: load_social_page
def load_social_page(request, user_id, username=None, **kwargs):
start = time.time()
user = request.user
social_user_id = int(user_id)
social_user = get_object_or_404(User, pk=social_user_id)
offset = int(request.REQUEST.get("offset", 0))
limit = int(request.REQUEST.get("limit", 6))
page = request.REQUEST.get("page")
format = request.REQUEST.get("format", None)
has_next_page = False
feed_id = kwargs.get("feed_id") or request.REQUEST.get("feed_id")
if page:
offset = limit * (int(page) - 1)
user_social_profile = None
user_social_services = None
if user.is_authenticated():
user_social_profile = MSocialProfile.get_user(user.pk)
user_social_services = MSocialServices.get_user(user.pk)
social_profile = MSocialProfile.get_user(social_user_id)
params = dict(user_id=social_user.pk)
if feed_id:
params["story_feed_id"] = feed_id
mstories = MSharedStory.objects(**params).order_by("-shared_date")[offset : offset + limit + 1]
stories = Feed.format_stories(mstories)
if len(stories) > limit:
has_next_page = True
stories = stories[:-1]
checkpoint1 = time.time()
if not stories:
params = {
"user": user,
"stories": [],
"feeds": {},
"social_user": social_user,
"social_profile": social_profile,
"user_social_services": user_social_services,
"user_social_profile": json.encode(user_social_profile and user_social_profile.page()),
}
template = "social/social_page.xhtml"
return render_to_response(template, params, context_instance=RequestContext(request))
story_feed_ids = list(set(s["story_feed_id"] for s in stories))
feeds = Feed.objects.filter(pk__in=story_feed_ids)
feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in feeds)
for story in stories:
if story["story_feed_id"] in feeds:
# Feed could have been deleted.
story["feed"] = feeds[story["story_feed_id"]]
shared_date = localtime_for_timezone(story["shared_date"], social_user.profile.timezone)
story["shared_date"] = shared_date
stories, profiles = MSharedStory.stories_with_comments_and_profiles(stories, social_user.pk, check_all=True)
checkpoint2 = time.time()
if user.is_authenticated():
for story in stories:
if user.pk in story["share_user_ids"]:
story["shared_by_user"] = True
shared_story = MSharedStory.objects.get(
user_id=user.pk, story_feed_id=story["story_feed_id"], story_guid=story["id"]
)
story["user_comments"] = shared_story.comments
stories = MSharedStory.attach_users_to_stories(stories, profiles)
params = {
"social_user": social_user,
"stories": stories,
"user_social_profile": user_social_profile,
"user_social_profile_page": json.encode(user_social_profile and user_social_profile.page()),
"user_social_services": user_social_services,
"user_social_services_page": json.encode(user_social_services and user_social_services.to_json()),
"social_profile": social_profile,
"feeds": feeds,
"user_profile": hasattr(user, "profile") and user.profile,
"has_next_page": has_next_page,
"holzer_truism": random.choice(jennyholzer.TRUISMS), # if not has_next_page else None
}
diff1 = checkpoint1 - start
diff2 = checkpoint2 - start
timediff = time.time() - start
logging.user(
request,
"~FYLoading ~FMsocial page~FY: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s~SN)"
% (social_profile.title[:22], ("~SN/p%s" % page) if page > 1 else "", timediff, diff1, diff2),
)
if format == "html":
template = "social/social_stories.xhtml"
else:
template = "social/social_page.xhtml"
return render_to_response(template, params, context_instance=RequestContext(request))
示例14: load_river_blurblog
def load_river_blurblog(request):
limit = 10
start = time.time()
user = get_user(request)
social_user_ids = [int(uid) for uid in request.REQUEST.getlist("social_user_ids") if uid]
original_user_ids = list(social_user_ids)
page = int(request.REQUEST.get("page", 1))
order = request.REQUEST.get("order", "newest")
read_filter = request.REQUEST.get("read_filter", "unread")
relative_user_id = request.REQUEST.get("relative_user_id", None)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if not relative_user_id:
relative_user_id = get_user(request).pk
if not social_user_ids:
socialsubs = MSocialSubscription.objects.filter(user_id=user.pk)
social_user_ids = [s.subscription_user_id for s in socialsubs]
offset = (page - 1) * limit
limit = page * limit - 1
story_ids, story_dates = MSocialSubscription.feed_stories(
user.pk, social_user_ids, offset=offset, limit=limit, order=order, read_filter=read_filter
)
mstories = MStory.objects(id__in=story_ids)
story_id_to_dates = dict(zip(story_ids, story_dates))
def sort_stories_by_id(a, b):
return int(story_id_to_dates[str(b.id)]) - int(story_id_to_dates[str(a.id)])
sorted_mstories = sorted(mstories, cmp=sort_stories_by_id)
stories = Feed.format_stories(sorted_mstories)
for s, story in enumerate(stories):
story["story_date"] = datetime.datetime.fromtimestamp(story_dates[s])
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, relative_user_id, check_all=True)
story_feed_ids = list(set(s["story_feed_id"] for s in stories))
usersubs = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids)
usersubs_map = dict((sub.feed_id, sub) for sub in usersubs)
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersubs_map.keys())))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
# Find starred stories
if story_feed_ids:
story_ids = [story["id"] for story in stories]
starred_stories = MStarredStory.objects(user_id=user.pk, story_guid__in=story_ids).only(
"story_guid", "starred_date"
)
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
shared_stories = MSharedStory.objects(user_id=user.pk, story_guid__in=story_ids).only(
"story_guid", "shared_date", "comments"
)
shared_stories = dict(
[
(story.story_guid, dict(shared_date=story.shared_date, comments=story.comments))
for story in shared_stories
]
)
userstories_db = MUserStory.objects(user_id=user.pk, feed_id__in=story_feed_ids, story_id__in=story_ids).only(
"story_id"
)
userstories = set(us.story_id for us in userstories_db)
else:
starred_stories = {}
shared_stories = {}
userstories = []
# Intelligence classifiers for all feeds involved
if story_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=story_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id__in=story_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(
user=user,
feed_ids=story_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags,
)
# Just need to format stories
for story in stories:
if story["id"] in userstories:
story["read_status"] = 1
elif story["story_date"] < UNREAD_CUTOFF:
story["read_status"] = 1
else:
story["read_status"] = 0
#.........这里部分代码省略.........
示例15: load_single_feed
def load_single_feed(request, feed_id):
start = datetime.datetime.utcnow()
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 12))
page = int(request.REQUEST.get('page', 1))
if page:
offset = limit * (page-1)
dupe_feed_id = None
if not feed_id:
raise Http404
try:
feed = Feed.objects.get(id=feed_id)
except Feed.DoesNotExist:
feed_address = request.REQUEST.get('feed_address')
dupe_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if dupe_feed:
feed = dupe_feed[0].feed
dupe_feed_id = feed_id
else:
raise Http404
stories = feed.get_stories(offset, limit)
# Get intelligence classifier for user
classifier_feeds = MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id)
classifier_authors = MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id)
classifier_titles = MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id)
classifier_tags = MClassifierTag.objects(user_id=user.pk, feed_id=feed_id)
usersub = UserSubscription.objects.get(user=user, feed=feed)
userstories = []
if usersub:
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id=feed.pk,
read_date__gte=usersub.mark_read_date)
starred_stories = MStarredStory.objects(user_id=user.pk, story_feed_id=feed_id).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
for us in userstories_db:
if hasattr(us.story, 'story_guid') and isinstance(us.story.story_guid, unicode):
userstories.append(us.story.story_guid)
elif hasattr(us.story, 'id') and isinstance(us.story.id, unicode):
userstories.append(us.story.id) # TODO: Remove me after migration from story.id->guid
for story in stories:
[x.rewind() for x in [classifier_feeds, classifier_authors, classifier_tags, classifier_titles]]
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
if usersub:
if story['id'] in userstories:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] < usersub.mark_read_date:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] > usersub.last_read_date:
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
classifiers = get_classifiers_for_user(user, feed_id, classifier_feeds,
classifier_authors, classifier_titles, classifier_tags)
if usersub:
usersub.feed_opens += 1
usersub.save()
diff = datetime.datetime.utcnow()-start
timediff = float("%s.%.2s" % (diff.seconds, (diff.microseconds / 1000)))
last_update = relative_timesince(feed.last_update)
logging.user(request.user, "~FYLoading feed: ~SB%s%s ~SN(%s seconds)" % (
feed, ('~SN/p%s' % page) if page > 1 else '', timediff))
FeedLoadtime.objects.create(feed=feed, loadtime=timediff)
data = dict(stories=stories,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
last_update=last_update,
feed_id=feed.pk)
if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
if not usersub:
data.update(feed.canonical())
#.........这里部分代码省略.........