本文整理汇总了Python中models.Post.link方法的典型用法代码示例。如果您正苦于以下问题:Python Post.link方法的具体用法?Python Post.link怎么用?Python Post.link使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Post
的用法示例。
在下文中一共展示了Post.link方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tweet_data
# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import link [as 别名]
def tweet_data(tweet):
if "RT @" not in tweet['text']:
try:
print( '@%s tweeted: %s\nFavorites: %d Retweets: %d' %\
( tweet['user']['screen_name'], tweet['text'],tweet['favorite_count'],\
tweet['retweet_count'] ))
post = Post()
post.title = tweet['text']
print post.title
post.posted_by = tweet['user']['screen_name']
print post.posted_by
post.date_time = tweet["created_at"].replace(" +0000","")
print post.date_time
post.retweets = tweet['retweet_count']
print post.retweets
post.favorites=tweet['favorite_count']
print post.favorites
post.ups=None
print post.ups
post.from_twitter = True
print post.from_twitter
post.post_link = "http://www.twitter.com/"+str(tweet['user']['screen_name'])+"/status/"+str(tweet['id'])
print post.post_link
post.link=regex_tweet_link(tweet)
print post.link
post.save()
return post
except:
print "saving failed!"
import sys
print sys.exc_info()
示例2: reddit_data
# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import link [as 别名]
def reddit_data(x):
post = Post()
post.title = x.title
post.link = x.url
post.posted_by = x.fullname
post.date_time = datetime.datetime.fromtimestamp\
(int(x.created_utc)).strftime('%c')
post.ups=x.ups
post.retweets=None
post.favorites=None
post.from_twitter=False
post.post_link = "http://www.reddit.com/"+str(x.id)
post.save()
return post
示例3: rss_worker
# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import link [as 别名]
def rss_worker(f):
"""RSS gevent worker function"""
logging.info("Starting reader process for feed %s", f.id)
id = f.id
error_count = f.errors
# Check ETag, Modified: Attempt Conditional HTTP retrieval
# to reduce excessive polling
if f.etag:
d = feedparser.parse(f.url, etag=f.etag)
elif f.last_modified:
d = feedparser.parse(f.url, modified=f.last_modified)
else:
d = feedparser.parse(f.url)
# Check returned HTTP status code
if 'status' in d and d.status < 400:
# Site appears to be UP
logging.info("Feed %s is UP, status %s", f.url, str(d.status))
# Reset error counter on successful connect
if error_count > 0:
q = Feed.update(errors=0).where(Feed.id == id)
q.execute()
# Get RSS/ATOM version number
logging.info("Feed version: %s", d.version)
# Catch status 301 Moved Permanently, update feed address
if d.status == 301:
q = Feed.update(url=d.href).where(Feed.id == id)
q.execute()
# Conditional HTTP:
# Check for Etag in result and write to DB
if 'etag' in d:
logging.info("Etag: %s", d.etag)
q = Feed.update(etag=d.etag).where(Feed.id == id)
q.execute()
# Conditional HTTP
# Check for Last-Modified in result and write to DB
if 'modified' in d:
logging.info("Modified %s", d.modified)
q = Feed.update(last_modified=d.modified).where(Feed.id == id)
q.execute()
# Check for feed modification date, write to DB
if 'published' in d:
logging.info("Published: %s", d.published)
if 'updated' in d:
logging.info("Updated: %s", d.updated)
# Check for 'not-modified' status code, skip updates if found
if d.status == 304:
logging.info("Feed %s -- no updates found, skipping", f.url)
return
# If post entries exist, process them
for post in d.entries:
post_content = ""
post_title = post.get('title', 'No title')
h = html.parser.HTMLParser()
desc = post.get('description', '')
desc = h.unescape(desc) # unescape HTML entities
post_description = re.sub(r'<[^>]*?>', '', desc) # crudely strip HTML tags in description
post_published = arrow.get(post.get('published_parsed')) or arrow.now()
if 'content' in post:
post_content = post.content[0].value
post_link = post.get('link', '')
# Get post checksum (title + description + link url)
check_string = (post_title + post_description + post_link).encode('utf8')
post_checksum = hashlib.sha224(check_string).hexdigest()
# If post checksum not found in DB, add post
if Post.select().where(Post.md5 == post_checksum).count() == 0:
p = Post()
p.title = post_title
p.description = post_description
p.published = post_published.datetime # convert from Arrow to datetime for DB
p.content = post_content
p.link = post_link
p.feed = id
p.md5 = post_checksum
p.save()
# TODO: Filter text for dangerous content (e.g. XSRF?)
# Feedparser already does this to some extent
# TODO: Spawn websocket message with new posts for web client
else:
# Site appears to be down
#.........这里部分代码省略.........