本文整理汇总了Python中pyatom.AtomFeed类的典型用法代码示例。如果您正苦于以下问题:Python AtomFeed类的具体用法?Python AtomFeed怎么用?Python AtomFeed使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AtomFeed类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: serialize_category_atom
def serialize_category_atom(category, url, user, event_filter):
"""Export the events in a category to Atom
:param category: The category to export
:param url: The URL of the feed
:param user: The user who needs to be able to access the events
:param event_filter: A SQLalchemy criterion to restrict which
events will be returned. Usually something
involving the start/end date of the event.
"""
query = (Event.query
.filter(Event.category_chain.contains([int(category.getId())]),
~Event.is_deleted,
event_filter)
.options(load_only('id', 'start_dt', 'title', 'description', 'protection_mode'),
subqueryload('acl_entries'))
.order_by(Event.start_dt))
events = [e for e in query if e.can_access(user)]
feed = AtomFeed(feed_url=url, title='Indico Feed [{}]'.format(to_unicode(category.getTitle())))
for event in events:
feed.add(title=event.title,
summary=unicode(event.description), # get rid of RichMarkup
url=url_for('event.conferenceDisplay', confId=event.id, _external=True),
updated=event.start_dt)
return BytesIO(feed.to_string().encode('utf-8'))
示例2: gen_feed
def gen_feed(runtime):
print "Generate feed.atom.."
posts = runtime.posts
conf = runtime.conf
charset = runtime.charset
feed = AtomFeed(
title=conf["blog"]["name"],
subtitle=conf["blog"]["description"],
feed_url=conf["blog"]["url"]+"/feed.atom",
url=conf["blog"]["url"],
author=conf["author"]["name"]
)
# gen the first 10 posts
for post in posts[:10]:
feed.add(
title=post.title,
content=post.html,
content_type="html",
author=conf["author"]["name"],
url=conf["blog"]["url"]+"/"+post.out,
updated=post.update_at
)
open("feed.atom", "w").write(feed.to_string().encode(charset))
示例3: get_sp_index
def get_sp_index(self, user, head):
restype = self._get_restype()
if restype == "default":
pages = WikiPage.get_index(user)
page_group = groupby(pages, lambda p: title_grouper(p.title))
html = self._template("wiki_sp_index.html", {"page_group": page_group})
self.response.headers["Content-Type"] = "text/html; charset=utf-8"
self._set_response_body(html, head)
elif restype == "atom":
pages = WikiPage.get_index(None)
config = WikiPage.yaml_by_title(".config")
host = self.request.host_url
url = "%s/sp.index?_type=atom" % host
feed = AtomFeed(
title="%s: title index" % config["service"]["title"],
feed_url=url,
url="%s/" % host,
author=config["admin"]["email"],
)
for page in pages:
feed.add(
title=page.title,
content_type="html",
author=page.modifier,
url="%s%s" % (host, page.absolute_url),
updated=page.updated_at,
)
self.response.headers["Content-Type"] = "text/xml; charset=utf-8"
self._set_response_body(feed.to_string(), head)
else:
self.abort(400, "Unknown type: %s" % restype)
示例4: get_changes
def get_changes(self, user, head):
restype = get_restype(self.request)
rendered = None
if restype == 'default':
if rendered is None:
pages = WikiPage.get_changes(user)
rendered = template(self.request, 'wiki_sp_changes.html',
{'pages': pages})
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
set_response_body(self.response, rendered, head)
elif restype == 'atom':
if rendered is None:
pages = WikiPage.get_changes(None, 3, include_body=True)
config = WikiPage.get_config()
host = self.request.host_url
url = "%s/sp.changes?_type=atom" % host
feed = AtomFeed(title="%s: changes" % config['service']['title'],
feed_url=url,
url="%s/" % host,
author=config['admin']['email'])
for page in pages:
feed.add(title=page.title,
content_type="html",
content=page.rendered_body,
author=page.modifier,
url='%s%s' % (host, page.absolute_url),
updated=page.updated_at)
rendered = feed.to_string()
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
set_response_body(self.response, rendered, head)
else:
self.abort(400, 'Unknown type: %s' % restype)
示例5: get_index
def get_index(self, user, head):
restype = get_restype(self.request)
if restype == 'default':
pages = WikiPage.get_index(user)
page_group = groupby(pages,
lambda p: title_grouper(p.title))
html = template(self.request, 'wiki_sp_index.html',
{'page_group': page_group})
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
set_response_body(self.response, html, head)
elif restype == 'atom':
pages = WikiPage.get_index(None)
config = WikiPage.get_config()
host = self.request.host_url
url = "%s/sp.index?_type=atom" % host
feed = AtomFeed(title="%s: title index" % config['service']['title'],
feed_url=url,
url="%s/" % host,
author=config['admin']['email'])
for page in pages:
feed.add(title=page.title,
content_type="html",
author=page.modifier,
url='%s%s' % (host, page.absolute_url),
updated=page.updated_at)
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
set_response_body(self.response, feed.to_string(), head)
else:
self.abort(400, 'Unknown type: %s' % restype)
示例6: publish
def publish(self):
feed = AtomFeed(title=self.title,
id=self.link,
url=self.link,
icon=self.icon,
updated=self.updated)
for date, summary in sorted(self.new_entries, reverse=True):
entry_link = self.link + '#' + date.strftime('%Y-%m-%d')
updated = date + datetime.timedelta(hours=23, minutes=59, seconds=59)
title = "Digest for " + date.strftime('%Y-%m-%d')
if self.prefix:
title = '[' + self.prefix + '] ' + title
if self.suffix:
title = title + ' [' + self.suffix + ']'
feed.add(title=title,
id=entry_link,
content=summary,
content_type='html',
url=entry_link,
updated=updated)
self.xml = feed.to_string()
示例7: _feed
def _feed(self, *args, **kwargs):
host = cherrypy.request.base
atom = AtomFeed(title=self.blog_title, url=host,
feed_url=cherrypy.url(),
author=self.author)
for post in self.listing():
atom.add(title=post["title"],
url=host + post["path"],
author=self.author,
content_type="html",
content=post["html"],
updated=post["date"])
return atom.to_string()
示例8: render_atom
def render_atom(req, title, path, pages, include_content=False, use_published_date=False):
config = WikiPage.get_config()
host = req.get_host()
title = '%s: %s' % (config['service']['title'], title)
url = "%s/%s?_type=atom" % (host, path)
feed = AtomFeed(title=title, feed_url=url, url="%s/" % host, author=config['admin']['email'])
for page in pages:
feed.add(title=page.title,
content_type="html",
content=(page.rendered_body if include_content else ""),
author=page.modifier,
url='%s%s' % (host, page.absolute_url),
updated=(page.published_at if use_published_date else page.updated_at))
return feed.to_string()
示例9: _execute
def _execute(self, fossils):
results = fossils["results"]
if type(results) != list:
results = [results]
feed = AtomFeed(title="Indico Feed", feed_url=fossils["url"])
for fossil in results:
feed.add(
title=unicodeOrNone(fossil["title"]),
summary=unicodeOrNone(fossil["description"]),
url=fossil["url"],
updated=fossil["startDate"], # ugh, but that's better than creationDate
)
return feed.to_string()
示例10: spider_opened
def spider_opened(self, spider):
self.feed = AtomFeed(
title = "Hacker News >100",
subtitle = "Hacker News over 100 points",
feed_url = "http://feeds.dannysu.com/hackernews100.atom",
url = "http://news.ycombinator.com/over?points=100"
)
示例11: output
def output():
print('\n [::: Writing output :::]\n')
out_dir = site.root['output']
for post in site.posts.values():
path = os.path.join(out_dir, post.Url)
os.makedirs(os.path.dirname(path), exist_ok = True)
with open(path, 'w') as f:
f.write(post.content)
print(path)
for f in site.files:
path_split = f.split(site.root['content'])
if len(path_split) == 1: # Doesn't split
continue
file_path = path_split[-1]
file_path = file_path[1:] # Cut first '/'
path = os.path.join(out_dir, file_path)
print(path)
os.makedirs(os.path.dirname(path), exist_ok = True)
if os.path.exists(path):
os.remove(path)
shutil.copy(f, path)
# Generate feed
feed = AtomFeed(title = site.info['title'],
feed_url = site.info['url'] + '/feed',
url = site.info['url'],
author = site.info['author'])
for post in site.posts.values():
if post.Layout == 'post':
feed.add(title = post.Title,
content = post.html,
content_type = "html",
author = post.Author,
url = post.Url,
updated = post.Date)
with open(os.path.join( site.root['output'],'feed'), 'w') as feed_file:
feed_file.write(feed.to_string())
# Update cache
with open(os.path.join(site.root['output'], site.cache_name), 'wb') as cache_file:
pickle.dump(site.posts, cache_file)
示例12: _execute
def _execute(self, fossils):
results = fossils['results']
if type(results) != list:
results = [results]
feed = AtomFeed(
title='Indico Feed',
feed_url=fossils['url']
)
for fossil in results:
feed.add(
title=unicodeOrNone(fossil['title']),
summary=unicodeOrNone(fossil['description']),
url=fossil['url'],
updated=fossil['startDate'] # ugh, but that's better than creationDate
)
return feed.to_string()
示例13: viewCategoriesATOM
def viewCategoriesATOM():
try:
feed = AtomFeed('Categories',
feed_url=request.url, url=request.url_root)
categories = session.query(Categories).order_by('name')
for category in categories:
feed.add(category.name, unicode(category.name),
content_type='html',
author=category.name,
url='',
updated=category.date_modified,
published=category.date_created)
return feed.get_response()
except:
flash('Error')
return redirect(url_for('showHomepage'))
示例14: index
def index(req):
forumurl = "writeme" # replace with e.g. "https://forums.factorio.com/viewforum.php?f=3"
if forumurl == "writeme":
req.status = mod_python.apache.HTTP_INTERNAL_SERVER_ERROR
return "The admin for this script needs to manually set the 'forumurl' parameter in the source code"
baseurl = get_baseurl(forumurl);
soup = get_soup(forumurl)
forumtitle = get_forumtitle(soup)
#generate feed
feed = AtomFeed(title=forumtitle,
url=forumurl,
icon=get_favicon(soup, baseurl))
#Add forum topics
for a in soup.findAll("a", { "class" : "topictitle" }):
datestring = a.parent.contents[-1]
datematch = re.match('^ » (.*?)\s*$', datestring, re.M)
datestring_trimmed = datematch.group(1)
published = updated = dateutil.parser.parse(datestring_trimmed)
author_a = a.parent.find("a", { "class" : "username-coloured"})
if author_a:
author = author_a.string
else:
author = "(author not found)"
#phpBB generates a unique new session id (sid) for each forum
#download, and adds this to all urls. This will make feed
#readers interpret each link as unique each time it polls. So we
#need to remove the sid=...
url = baseurl + "/" + a["href"]
url = re.sub('&sid=[0-9a-f]+','', url)
feed.add(title=a.string,
url=url,
published=published,
updated=updated,
author=author,
)
return feed.to_string()
示例15: HackernewsPipeline
class HackernewsPipeline(object):
output_filename = "hackernews100.atom"
def __init__(self):
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
self.existing_feed = feedparser.parse(self.output_filename)
def spider_opened(self, spider):
self.feed = AtomFeed(
title = "Hacker News >100",
subtitle = "Hacker News over 100 points",
feed_url = "http://feeds.dannysu.com/hackernews100.atom",
url = "http://news.ycombinator.com/over?points=100"
)
def spider_closed(self, spider):
f = codecs.open(self.output_filename, 'w', 'utf-8')
f.write(self.feed.to_string())
def process_item(self, item, spider):
found = False
for entry in self.existing_feed['entries']:
if entry.link == item['link']:
item['body'] = entry.content[0].value
found = True
if not found:
body = ""
if not item['link'].endswith('.pdf'):
html = urllib.urlopen(item['link']).read()
body = Document(html).summary()
item['body'] = '<a href="' + item['comment'] + '">HN Comments</a><br>' + body
self.feed.add(
url = item['link'],
title = item['title'],
content = item['body'],
content_type = "html",
updated=datetime.datetime.utcnow()
)
return item