本文整理汇总了Python中models.Article.key方法的典型用法代码示例。如果您正苦于以下问题:Python Article.key方法的具体用法?Python Article.key怎么用?Python Article.key使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Article
的用法示例。
在下文中一共展示了Article.key方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: post
# 需要导入模块: from models import Article [as 别名]
# 或者: from models.Article import key [as 别名]
def post(self):
user = users.get_current_user()
if user and users.is_current_user_admin():
params = self.get_params_dict(['title', 'pubDate', 'content', 'tags', 'image'])
title_for_tags = re.sub(r'[^\w]', ' ', params['title'])
raw_tags = params['tags'].split(',')
tags = minifyTags(raw_tags)
article = Article(
title = force_unicode(params['title']),
content = force_unicode(params['content']),
tags = tags,
image = params['image']
)
article.store()
# Tags are stored in article as provided by the publisher.
# However, when actual mapping is created, the title is
# taken into account.
tags = minifyTags(tags + title_for_tags.split(' '))
for tag in tags:
article_tag = ArticleTag.get(tag)
if not article_tag:
article_tag = ArticleTag(
key_name = tag,
articles = [article.key()]
)
else:
article_tag.articles.append(article.key())
article_tag.store()
self.render('blogAdmin.html', {
'pageTitle': 'BLOG ADMIN',
'logoutUrl': users.create_logout_url('/blog'),
'message': 'success'
})
else:
self.write("Unauthorized.")
示例2: process_links
# 需要导入模块: from models import Article [as 别名]
# 或者: from models.Article import key [as 别名]
def process_links(company):
links = linkz(company.ticker,company.exchange)
# logging.debug("google links: %s",links)
yahoo_links = yahoo_linkz(company.ticker)
# logging.debug("yahoo links: %s",yahoo_links)
old_titles = company.titles
# titles = []
if links != None:
# for [title,link] in links:
# titles.append(title)
# this could be done with a if title not in old_titles check, to avoid 2 loops through the same list: - see 'if title in titles' below
# titles = [title for title in titles if title not in old_titles]
new_titles = []
# if titles == []:
# # company.finished_scraping = True # denne slaar inn for tidlig, siden den kommer foer alle artiklene er tatt
# # company.put()
# DU ER HER - this is probably why nothing is stored:
# denne burde returnere article keys, siden mekanismen er at titles gradvis tømmes, og er tom når man er ferdig:
# return None
# link_ctr = 1
article_keys = []
# scrape_ctr = 0
for [title, link] in links:
# if link_ctr > 100: # sanity check. there should normally not be more articles than this per day.
# return article_keys
#break # from this links loop
#this is where you should do if titles not in old_titles instead:
if title not in old_titles:
# link_ctr += 1
if link != None and link != "":
html = fetch(link)
if html != None:
article = Article()
article.title = title
# logging.debug("title: %s", title)
# titles.remove(title) # when finished, titles = []
article.html = html
article.url = link
article.company = company
# article.clean = False
article.put()
# scrape_ctr += 1
article_keys.append(article.key())
new_titles.append(title)
new_titles = old_titles + new_titles
company.titles = new_titles #this list should be shortened every now and then - not if it's used for display!
company.put()
# logging.debug("scraped %s articles", scrape_ctr)
return article_keys
else:
return None