本文整理汇总了Python中apps.rss_feeds.models.MStarredStory类的典型用法代码示例。如果您正苦于以下问题:Python MStarredStory类的具体用法?Python MStarredStory怎么用?Python MStarredStory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MStarredStory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forwards
def forwards(self, orm):
from apps.rss_feeds.models import MStarredStory
from apps.social.models import MSharedStory
db = settings.MONGODB
starred_count = MStarredStory.objects.count()
print " ---> Saving %s starred stories..." % starred_count
shared_count = MSharedStory.objects.count()
print " ---> Saving %s shared stories..." % shared_count
start = 0
user_count = User.objects.latest('pk').pk
for user_id in xrange(start, user_count):
if user_id % 1000 == 0:
print " ---> %s/%s" % (user_id, user_count)
stories = MStarredStory.objects(user_id=user_id, story_hash__exists=False)\
.only('id', 'story_feed_id', 'story_guid')\
.read_preference(
pymongo.ReadPreference.SECONDARY
)
for i, story in enumerate(stories):
db.newsblur.starred_stories.update({"_id": story.id}, {"$set": {
"story_hash": story.feed_guid_hash
}})
stories = MSharedStory.objects(user_id=user_id, story_hash__exists=False)\
.only('id', 'user_id', 'story_feed_id', 'story_guid')\
.read_preference(
pymongo.ReadPreference.SECONDARY
)
for i, story in enumerate(stories):
db.newsblur.shared_stories.update({"_id": story.id}, {"$set": {
"story_hash": story.feed_guid_hash
}})
示例2: load_starred_stories
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
if page: offset = limit * (page - 1)
mstories = MStarredStory.objects(user_id=user.pk).order_by('-starred_date')[offset:offset+limit]
stories = Feed.format_stories(mstories)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['read_status'] = 1
story['starred'] = True
story['intelligence'] = {
'feed': 0,
'author': 0,
'tags': 0,
'title': 0,
}
logging.user(request, "~FCLoading starred stories: ~SB%s stories" % (len(stories)))
return dict(stories=stories)
示例3: load_feeds
def load_feeds(request):
user = get_user(request)
feeds = {}
not_yet_fetched = False
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed', 'feed__feed_icon').filter(user=user)
for sub in user_subs:
feeds[sub.feed.pk] = {
'id': sub.feed.pk,
'feed_title': sub.user_title or sub.feed.feed_title,
'feed_address': sub.feed.feed_address,
'feed_link': sub.feed.feed_link,
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'updated': relative_timesince(sub.feed.last_update),
'subs': sub.feed.num_subscribers,
'active': sub.active,
'favicon': sub.feed.icon.data,
'favicon_color': sub.feed.icon.color,
'favicon_fetching': bool(not (sub.feed.icon.not_found or sub.feed.icon.data))
}
if not sub.feed.fetched_once:
not_yet_fetched = True
feeds[sub.feed.pk]['not_yet_fetched'] = True
if sub.feed.has_page_exception or sub.feed.has_feed_exception:
feeds[sub.feed.pk]['has_exception'] = True
feeds[sub.feed.pk]['exception_type'] = 'feed' if sub.feed.has_feed_exception else 'page'
feeds[sub.feed.pk]['feed_address'] = sub.feed.feed_address
feeds[sub.feed.pk]['exception_code'] = sub.feed.exception_code
if not sub.feed.active and not sub.feed.has_feed_exception and not sub.feed.has_page_exception:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if not_yet_fetched:
for f in feeds:
if 'not_yet_fetched' not in feeds[f]:
feeds[f]['not_yet_fetched'] = False
starred_count = MStarredStory.objects(user_id=user.pk).count()
data = {
'feeds': feeds,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
}
return data
示例4: mark_story_as_unstarred
def mark_story_as_unstarred(request):
code = 1
story_id = request.POST["story_id"]
starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
if starred_story:
logging.info(" ---> [%s] ~FCUnstarring: ~SB%s" % (request.user, starred_story[0].story_title[:50]))
starred_story.delete()
else:
code = -1
return {"code": code}
示例5: api_saved_story
def api_saved_story(request):
user = request.user
body = request.body_json
after = body.get('after', None)
before = body.get('before', None)
limit = body.get('limit', 50)
fields = body.get('triggerFields')
story_tag = fields['story_tag']
entries = []
if story_tag == "all":
story_tag = ""
params = dict(user_id=user.pk)
if story_tag:
params.update(dict(user_tags__contains=story_tag))
mstories = MStarredStory.objects(**params).order_by('-starred_date')[:limit]
stories = Feed.format_stories(mstories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
feeds = dict([(f.pk, {
"title": f.feed_title,
"website": f.feed_link,
"address": f.feed_address,
}) for f in Feed.objects.filter(pk__in=found_feed_ids)])
for story in stories:
if before and int(story['story_date'].strftime("%s")) > before: continue
if after and int(story['story_date'].strftime("%s")) < after: continue
feed = feeds.get(story['story_feed_id'], None)
entries.append({
"StoryTitle": story['story_title'],
"StoryContent": story['story_content'],
"StoryURL": story['story_permalink'],
"StoryAuthor": story['story_authors'],
"PublishedAt": story['story_date'].strftime("%Y-%m-%dT%H:%M:%SZ"),
"SavedAt": story['starred_date'].strftime("%Y-%m-%dT%H:%M:%SZ"),
"Tags": ', '.join(story['user_tags']),
"Site": feed and feed['title'],
"SiteURL": feed and feed['website'],
"SiteRSS": feed and feed['address'],
"ifttt": {
"id": story['story_hash'],
"timestamp": int(story['starred_date'].strftime("%s"))
},
})
if after:
entries = sorted(entries, key=lambda s: s['ifttt']['timestamp'])
logging.user(request, "~FCChecking saved stories from ~SBIFTTT~SB: ~SB%s~SN - ~SB%s~SN stories" % (story_tag if story_tag else "[All stories]", len(entries)))
return {"data": entries}
示例6: mark_story_as_unstarred
def mark_story_as_unstarred(request):
code = 1
story_id = request.POST['story_id']
starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
if starred_story:
logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story[0].story_title[:50]))
starred_story.delete()
else:
code = -1
return {'code': code}
示例7: load_feeds
def load_feeds(request):
user = get_user(request)
feeds = {}
not_yet_fetched = False
include_favicons = request.REQUEST.get('include_favicons', False)
flat = request.REQUEST.get('flat', False)
update_counts = request.REQUEST.get('update_counts', False)
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if flat == 'false': flat = False
if flat: return load_feeds_flat(request)
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
for sub in user_subs:
pk = sub.feed.pk
if update_counts:
sub.calculate_feed_scores(silent=True)
feeds[pk] = sub.canonical(include_favicon=include_favicons)
if feeds[pk].get('not_yet_fetched'):
not_yet_fetched = True
if not sub.feed.active and not sub.feed.has_feed_exception and not sub.feed.has_page_exception:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if sub.active and sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if not_yet_fetched:
for f in feeds:
if 'not_yet_fetched' not in feeds[f]:
feeds[f]['not_yet_fetched'] = False
starred_count = MStarredStory.objects(user_id=user.pk).count()
data = {
'feeds': feeds,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
}
return data
示例8: load_feeds
def load_feeds(request):
user = get_user(request)
feeds = {}
not_yet_fetched = False
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related("feed").filter(user=user)
for sub in user_subs:
feeds[sub.feed.pk] = {
"id": sub.feed.pk,
"feed_title": sub.user_title or sub.feed.feed_title,
"feed_address": sub.feed.feed_address,
"feed_link": sub.feed.feed_link,
"ps": sub.unread_count_positive,
"nt": sub.unread_count_neutral,
"ng": sub.unread_count_negative,
"updated": relative_timesince(sub.feed.last_update),
"subs": sub.feed.num_subscribers,
"active": sub.active,
}
if not sub.feed.fetched_once:
not_yet_fetched = True
feeds[sub.feed.pk]["not_yet_fetched"] = True
if sub.feed.has_page_exception or sub.feed.has_feed_exception:
feeds[sub.feed.pk]["has_exception"] = True
feeds[sub.feed.pk]["exception_type"] = "feed" if sub.feed.has_feed_exception else "page"
feeds[sub.feed.pk]["feed_address"] = sub.feed.feed_address
feeds[sub.feed.pk]["exception_code"] = sub.feed.exception_code
if not sub.feed.active and not sub.feed.has_feed_exception and not sub.feed.has_page_exception:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if not_yet_fetched:
for f in feeds:
if "not_yet_fetched" not in feeds[f]:
feeds[f]["not_yet_fetched"] = False
starred_count = MStarredStory.objects(user_id=user.pk).count()
data = {"feeds": feeds, "folders": json.decode(folders.folders), "starred_count": starred_count}
return data
示例9: api_saved_story
def api_saved_story(request):
user = request.user
body = json.decode(request.body)
after = body.get('after', None)
before = body.get('before', None)
limit = body.get('limit', 50)
fields = body.get('triggerFields')
story_tag = fields['story_tag']
entries = []
if story_tag == "all":
story_tag = ""
mstories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=story_tag
).order_by('-starred_date')[:limit]
stories = Feed.format_stories(mstories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
feeds = dict([(f.pk, {
"title": f.feed_title,
"website": f.feed_link,
"address": f.feed_address,
}) for f in Feed.objects.filter(pk__in=found_feed_ids)])
for story in stories:
if before and int(story['story_date'].strftime("%s")) > before: continue
if after and int(story['story_date'].strftime("%s")) < after: continue
feed = feeds.get(story['story_feed_id'], None)
entries.append({
"StoryTitle": story['story_title'],
"StoryContent": story['story_content'],
"StoryUrl": story['story_permalink'],
"StoryAuthor": story['story_authors'],
"StoryDate": story['story_date'].isoformat(),
"SavedDate": story['starred_date'].isoformat(),
"SavedTags": ', '.join(story['user_tags']),
"SiteTitle": feed and feed['title'],
"SiteWebsite": feed and feed['website'],
"SiteFeedAddress": feed and feed['address'],
"ifttt": {
"id": story['story_hash'],
"timestamp": int(story['starred_date'].strftime("%s"))
},
})
logging.user(request, "~FCChecking saved stories from ~SBIFTTT~SB: ~SB%s~SN - ~SB%s~SN stories" % (story_tag if story_tag else "[All stories]", len(entries)))
return {"data": entries}
示例10: load_river_stories
def load_river_stories(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.POST.getlist("feeds")]
offset = int(request.REQUEST.get("offset", 0))
limit = int(request.REQUEST.get("limit", 25))
page = int(request.REQUEST.get("page", 0)) + 1
read_stories = int(request.REQUEST.get("read_stories", 0))
# if page: offset = limit * page
if page:
limit = limit * page - read_stories
def feed_qvalues(feed_id):
feed = UserSubscription.objects.get(feed__pk=feed_id, user=user)
return Q(story_feed_id=feed_id) & Q(story_date__gte=feed.mark_read_date)
feed_last_reads = map(feed_qvalues, feed_ids)
qs = reduce(lambda q1, q2: q1 | q2, feed_last_reads)
read_stories = MUserStory.objects(user_id=user.pk, feed_id__in=feed_ids).only("story")
read_stories = [rs.story.id for rs in read_stories]
mstories = MStory.objects(Q(id__nin=read_stories) & qs)[offset : offset + limit]
stories = Feed.format_stories(mstories)
starred_stories = MStarredStory.objects(user_id=user.pk, story_feed_id__in=feed_ids).only(
"story_guid", "starred_date"
)
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
for story in stories:
story_date = localtime_for_timezone(story["story_date"], user.profile.timezone)
story["short_parsed_date"] = format_story_link_date__short(story_date)
story["long_parsed_date"] = format_story_link_date__long(story_date)
story["read_status"] = 0
if story["id"] in starred_stories:
story["starred"] = True
starred_date = localtime_for_timezone(starred_stories[story["id"]], user.profile.timezone)
story["starred_date"] = format_story_link_date__long(starred_date)
story["intelligence"] = {"feed": 0, "author": 0, "tags": 0, "title": 0}
logging.info(
" ---> [%s] ~FCLoading river stories: ~SB%s stories ~SN(%s feeds)" % (request.user, len(stories), len(feed_ids))
)
return dict(stories=stories)
示例11: load_feeds
def load_feeds(request):
user = get_user(request)
feeds = {}
not_yet_fetched = False
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed', 'feed__feed_icon').filter(user=user)
for sub in user_subs:
feeds[sub.feed.pk] = sub.canonical()
if feeds[sub.feed.pk].get('not_yet_fetched'):
not_yet_fetched = True
if not sub.feed.active and not sub.feed.has_feed_exception and not sub.feed.has_page_exception:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if sub.active and sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
if not_yet_fetched:
for f in feeds:
if 'not_yet_fetched' not in feeds[f]:
feeds[f]['not_yet_fetched'] = False
starred_count = MStarredStory.objects(user_id=user.pk).count()
data = {
'feeds': feeds,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
}
return data
示例12: load_starred_stories
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get("offset", 0))
limit = int(request.REQUEST.get("limit", 10))
page = int(request.REQUEST.get("page", 0))
if page:
offset = limit * page
mstories = MStarredStory.objects(user_id=user.pk).order_by("-starred_date")[offset : offset + limit]
stories = Feed.format_stories(mstories)
for story in stories:
story_date = localtime_for_timezone(story["story_date"], user.profile.timezone)
story["short_parsed_date"] = format_story_link_date__short(story_date)
story["long_parsed_date"] = format_story_link_date__long(story_date)
starred_date = localtime_for_timezone(story["starred_date"], user.profile.timezone)
story["starred_date"] = format_story_link_date__long(starred_date)
story["read_status"] = 1
story["starred"] = True
story["intelligence"] = {"feed": 0, "author": 0, "tags": 0, "title": 0}
logging.info(" ---> [%s] ~FCLoading starred stories: ~SB%s stories" % (request.user, len(stories)))
return dict(stories=stories)
示例13: load_river_stories
def load_river_stories(request):
limit = 18
offset = 0
start = datetime.datetime.utcnow()
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feeds') if feed_id]
original_feed_ids = list(feed_ids)
page = int(request.REQUEST.get('page', 1))
read_stories_count = int(request.REQUEST.get('read_stories_count', 0))
new_flag = request.REQUEST.get('new_flag', False)
bottom_delta = datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if not feed_ids:
logging.user(request, "~FCLoading empty river stories: page %s" % (page))
return dict(stories=[])
# Fetch all stories at and before the page number.
# Not a single page, because reading stories can move them up in the unread order.
# `read_stories_count` is an optimization, works best when all 25 stories before have been read.
limit = limit * page - read_stories_count
# Read stories to exclude
read_stories = MUserStory.objects(user_id=user.pk, feed_id__in=feed_ids).only('story_id')
read_stories = [rs.story_id for rs in read_stories]
# Determine mark_as_read dates for all feeds to ignore all stories before this date.
# max_feed_count = 0
feed_counts = {}
feed_last_reads = {}
for feed_id in feed_ids:
try:
usersub = UserSubscription.objects.get(feed__pk=feed_id, user=user)
except UserSubscription.DoesNotExist:
continue
if not usersub: continue
feed_counts[feed_id] = (usersub.unread_count_negative * 1 +
usersub.unread_count_neutral * 10 +
usersub.unread_count_positive * 20)
# if feed_counts[feed_id] > max_feed_count:
# max_feed_count = feed_counts[feed_id]
feed_last_reads[feed_id] = int(time.mktime(usersub.mark_read_date.timetuple()))
feed_counts = sorted(feed_counts.items(), key=itemgetter(1))[:50]
feed_ids = [f[0] for f in feed_counts]
feed_last_reads = dict([(str(feed_id), feed_last_reads[feed_id]) for feed_id in feed_ids
if feed_id in feed_last_reads])
feed_counts = dict(feed_counts)
# After excluding read stories, all that's left are stories
# past the mark_read_date. Everything returned is guaranteed to be unread.
mstories = MStory.objects(
story_guid__nin=read_stories,
story_feed_id__in=feed_ids,
# story_date__gte=start - bottom_delta
).map_reduce("""function() {
var d = feed_last_reads[this[~story_feed_id]];
if (this[~story_date].getTime()/1000 > d) {
emit(this[~id], this);
}
}""",
"""function(key, values) {
return values[0];
}""",
output='inline',
scope={
'feed_last_reads': feed_last_reads
}
)
mstories = [story.value for story in mstories if story and story.value]
mstories = sorted(mstories, cmp=lambda x, y: cmp(story_score(y, bottom_delta), story_score(x, bottom_delta)))
# story_feed_counts = defaultdict(int)
# mstories_pruned = []
# for story in mstories:
# print story['story_title'], story_feed_counts[story['story_feed_id']]
# if story_feed_counts[story['story_feed_id']] >= 3: continue
# mstories_pruned.append(story)
# story_feed_counts[story['story_feed_id']] += 1
stories = []
for i, story in enumerate(mstories):
if i < offset: continue
if i >= offset + limit: break
stories.append(bunch(story))
stories = Feed.format_stories(stories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
# Find starred stories
starred_stories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=found_feed_ids
).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date)
for story in starred_stories])
# Intelligence classifiers for all feeds involved
def sort_by_feed(classifiers):
feed_classifiers = defaultdict(list)
for classifier in classifiers:
feed_classifiers[classifier.feed_id].append(classifier)
return feed_classifiers
#.........这里部分代码省略.........
示例14: load_single_feed
def load_single_feed(request, feed_id):
start = time.time()
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 12))
page = int(request.REQUEST.get('page', 1))
dupe_feed_id = None
userstories_db = None
if page: offset = limit * (page-1)
if not feed_id: raise Http404
try:
feed = Feed.objects.get(id=feed_id)
except Feed.DoesNotExist:
feed_address = request.REQUEST.get('feed_address')
dupe_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if dupe_feed:
feed = dupe_feed[0].feed
dupe_feed_id = feed_id
else:
raise Http404
stories = feed.get_stories(offset, limit)
# Get intelligence classifier for user
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
checkpoint1 = time.time()
usersub = UserSubscription.objects.get(user=user, feed=feed)
userstories = []
if usersub and stories:
story_ids = [story['id'] for story in stories]
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id=feed.pk,
story_id__in=story_ids).only('story_id')
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id=feed_id,
story_guid__in=story_ids).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
userstories = set(us.story_id for us in userstories_db)
checkpoint2 = time.time()
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
if usersub:
if story['id'] in userstories:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] < usersub.mark_read_date:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] > usersub.last_read_date:
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
checkpoint3 = time.time()
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
classifiers = get_classifiers_for_user(user, feed_id, classifier_feeds,
classifier_authors, classifier_titles, classifier_tags)
if usersub:
usersub.feed_opens += 1
usersub.save()
diff1 = checkpoint1-start
diff2 = checkpoint2-start
diff3 = checkpoint3-start
timediff = time.time()-start
last_update = relative_timesince(feed.last_update)
logging.user(request, "~FYLoading feed: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s(%s)/%.4s~SN)" % (
feed.feed_title[:32], ('~SN/p%s' % page) if page > 1 else '', timediff,
diff1, diff2, userstories_db and userstories_db.count() or '~SN0~SB', diff3))
FeedLoadtime.objects.create(feed=feed, loadtime=timediff)
data = dict(stories=stories,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
last_update=last_update,
feed_id=feed.pk)
#.........这里部分代码省略.........
示例15: load_single_feed
def load_single_feed(request, feed_id):
start = datetime.datetime.utcnow()
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 12))
page = int(request.REQUEST.get('page', 1))
if page:
offset = limit * (page-1)
dupe_feed_id = None
if not feed_id:
raise Http404
try:
feed = Feed.objects.get(id=feed_id)
except Feed.DoesNotExist:
feed_address = request.REQUEST.get('feed_address')
dupe_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if dupe_feed:
feed = dupe_feed[0].feed
dupe_feed_id = feed_id
else:
raise Http404
stories = feed.get_stories(offset, limit)
# Get intelligence classifier for user
classifier_feeds = MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id)
classifier_authors = MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id)
classifier_titles = MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id)
classifier_tags = MClassifierTag.objects(user_id=user.pk, feed_id=feed_id)
usersub = UserSubscription.objects.get(user=user, feed=feed)
userstories = []
if usersub:
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id=feed.pk,
read_date__gte=usersub.mark_read_date)
starred_stories = MStarredStory.objects(user_id=user.pk, story_feed_id=feed_id).only('story_guid', 'starred_date')
starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories])
for us in userstories_db:
if hasattr(us.story, 'story_guid') and isinstance(us.story.story_guid, unicode):
userstories.append(us.story.story_guid)
elif hasattr(us.story, 'id') and isinstance(us.story.id, unicode):
userstories.append(us.story.id) # TODO: Remove me after migration from story.id->guid
for story in stories:
[x.rewind() for x in [classifier_feeds, classifier_authors, classifier_tags, classifier_titles]]
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now)
if usersub:
if story['id'] in userstories:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] < usersub.mark_read_date:
story['read_status'] = 1
elif not story.get('read_status') and story['story_date'] > usersub.last_read_date:
story['read_status'] = 0
if story['id'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
classifiers = get_classifiers_for_user(user, feed_id, classifier_feeds,
classifier_authors, classifier_titles, classifier_tags)
if usersub:
usersub.feed_opens += 1
usersub.save()
diff = datetime.datetime.utcnow()-start
timediff = float("%s.%.2s" % (diff.seconds, (diff.microseconds / 1000)))
last_update = relative_timesince(feed.last_update)
logging.user(request.user, "~FYLoading feed: ~SB%s%s ~SN(%s seconds)" % (
feed, ('~SN/p%s' % page) if page > 1 else '', timediff))
FeedLoadtime.objects.create(feed=feed, loadtime=timediff)
data = dict(stories=stories,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
last_update=last_update,
feed_id=feed.pk)
if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
if not usersub:
data.update(feed.canonical())
#.........这里部分代码省略.........