本文整理汇总了Python中feedgen.feed.FeedGenerator.rss_file方法的典型用法代码示例。如果您正苦于以下问题:Python FeedGenerator.rss_file方法的具体用法?Python FeedGenerator.rss_file怎么用?Python FeedGenerator.rss_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类feedgen.feed.FeedGenerator
的用法示例。
在下文中一共展示了FeedGenerator.rss_file方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_podcast
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
"""Create the podcast file."""
fg = FeedGenerator()
fg.load_extension('podcast')
url = "{}{}.xml".format(base_public_url, show.id)
fg.id(url.split('.')[0])
fg.title(show.name)
fg.image(show.image_url)
fg.description(show.description)
fg.link(href=url, rel='self')
# collect all mp3s for the given show
all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))
for filepath in all_mp3s:
filename = os.path.basename(filepath)
mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
mp3_size = os.stat(filepath).st_size
mp3_url = base_public_url + filename
mp3_id = filename.split('.')[0]
title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)
# build the rss entry
fe = fg.add_entry()
fe.id(mp3_id)
fe.pubdate(mp3_date)
fe.title(title)
fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')
fg.rss_str(pretty=True)
fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
示例2: gen_feed
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def gen_feed(user, base_url, path, debug=False):
# Create feed
feed = FeedGenerator()
feed.id(urlparse.urljoin(base_url, user + '.xml'))
feed.title('Snapchat story for ' + user)
feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' )
feed.language('en')
feed.description('Snapchat media')
# Iterate through files in path, sort by unix timestamp (newest first), then add to feed
files = sorted(os.listdir(path), reverse=True)
for filename in files:
split = filename.split('~')
if split[0] != user:
continue
if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
entry = feed.add_entry()
entry.id(urlparse.urljoin(base_url, filename))
entry.link(href=urlparse.urljoin(base_url, filename))
entry.title(filename)
# Write feed to disk
feed.rss_file(os.path.join(path, user + '.xml'))
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
if debug:
print('{0} Regenerated {1}'.format(date, urlparse.urljoin(base_url,
user + '.xml')))
示例3: generate_feed
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def generate_feed(output_file, exclude_highlights=True):
# Parse RSS feed
d = feedparser.parse(ESPN_RSS_FEED)
IMAGE_URL = d.feed.image["href"]
# RSS feed generation
fg = FeedGenerator()
fg.load_extension("podcast", rss=True)
## RSS tags
# Required
fg.title(d.feed.title)
fg.link(href="https://github.com/aaearon/lebatard-show-rss")
fg.description(d.feed.description)
# Optional
fg.language(d.feed.language)
fg.image(IMAGE_URL)
fg.subtitle(d.feed.subtitle)
# iTunes
fg.podcast.itunes_author(d.feed.author)
fg.podcast.itunes_category(itunes_category=d.feed.category)
fg.podcast.itunes_image(itunes_image=IMAGE_URL)
fg.podcast.itunes_explicit(itunes_explicit="clean")
fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])
tz = pytz.timezone("America/Los_Angeles")
for e in d.entries:
if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
pass
else:
fe = fg.add_entry()
fe.id(e.id)
fe.title(e.title)
fe.description(e.description)
fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])
fe.podcast.itunes_summary(e.description)
fe.podcast.itunes_subtitle(e.description)
fe.podcast.itunes_duration(e["itunes_duration"])
dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
date = tz.localize(dt)
# Local hour
if "Show: " in e.title:
fe.published(date)
elif "Hour 1" in e.title:
fe.published(date + timedelta(hours=1))
elif "Hour 2" in e.title:
fe.published(date + timedelta(hours=2))
elif "Hour 3" in e.title:
fe.published(date + timedelta(hours=3))
else:
fe.published(date + timedelta(hours=-1))
fg.rss_str(pretty=True)
fg.rss_file(output_file)
示例4: build
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def build():
global fg
fg = FeedGenerator()
fg.title(parser.get('technowatch', 'name'))
fg.language('en')
fg.description(parser.get('technowatch', 'name'))
fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
# Cleaning stories if too much
if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
clean()
# Sorting stories by crawled date
for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
fe = fg.add_entry()
fe.link(href=item['url'], rel='alternate')
fe.title("[" + item['type'] + "] " + item['title'])
fe.category({
'label': item['type'],
'term': item['type']
})
fe.author({'name': item['by']})
fe.description(item['desc'])
fe.pubdate(item['crawledDate'])
# Caching RSS building
pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
if parser.get('wsgi', 'activated') == "True":
fg.rss_file(cust_path + '/static/rss.xml')
if parser.get('ftp', 'activated') == "True":
upload()
示例5: run
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def run(folder, url):
from feedgen.feed import FeedGenerator
fg = FeedGenerator()
head, tail = os.path.split(folder)
title = tail.decode("utf-8")
fg.id(str(uuid.uuid4()))
fg.title(title)
fg.link(href="{0}/rss.xml".format(url), rel="self")
fg.description(u"Audiobook `{0}` generated with rssbook".format(title))
fg.load_extension("podcast")
for item in sorted(os.listdir(folder)):
if os.path.splitext(item)[1] == ".mp3":
get_node(os.path.join(folder, item))
fullpath = os.path.join(folder, item)
fe = fg.add_entry()
fe.id(str(uuid.uuid4()))
fe.title(title)
fe.description(item)
fe.link(
href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size)
)
fg.rss_file(os.path.join(folder, "rss.xml"))
示例6: main
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def main():
session = vk.Session()
api = vk.API(session)
group_id = '96469126'
group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid'])
assert len(group_info) == 1
group_info = group_info[0]
url = 'http://vk.com/club{}'.format(group_info['gid'])
# a = api.wall.get(owner_id=-1 * group_info['gid'])
#
# with open('out', 'wb') as fio:
# pickle.dump(a, fio)
with open('out', 'rb') as fio:
data = pickle.loads(fio.read())
assert len(data) > 1
fg = FeedGenerator()
fg.id(url)
fg.title(_(group_info['name']))
fg.description(_(group_info['description']))
fg.logo(group_info['photo'])
site_url = group_info.get('site', url) if group_info.get('site', url) else url
fg.link(href=_(site_url))
fg.link(href=_(site_url), rel='self')
fg.link(href=_(site_url), rel='alternate')
fg.author({'name': 'Alexander Sapronov', 'email': '[email protected]'})
fg.webMaster('[email protected] (Alexander Sapronov)')
pat = re.compile(r"#(\w+)")
for x in data[1:]:
post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id'])
e = fg.add_entry()
# text = x.get('text', '').replace('<br>', '\n')
text = x.get('text', '')
e.description(_(text))
e.author({'name': _(get_author_name(api, x.get('from_id')))})
e.id(post_link)
e.link(href=_(post_link))
e.link(href=_(post_link), rel='alternate')
tags = pat.findall(text)
title = x.get('text', '')
for tag in tags:
e.category(term=_(tag))
title = title.replace('#{}'.format(tag), '')
title = re.sub('<[^<]+?>', ' ', title)
title = textwrap.wrap(title, width=80)[0]
e.title(_(title.strip()))
fg.rss_file('rss.xml')
示例7: __init__
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
class YoutubeFeed:
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': '%(id)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}]
}
def __init__(self, name):
self.name = name
self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)
self.fg = FeedGenerator()
self.fg.title(name)
self.fg.author({"name": "Youtube Audio Feed", "email": ""})
self.fg.link(href="http://www.foo.bar.baz.com", rel="alternate")
self.fg.description("Personalized Youtube audio feed")
self.fg.generator("")
self.fg.docs("")
def add_video(self, url):
info = self.ydl.extract_info(url, download=True)
entry = self.fg.add_entry()
entry.id(info['id'])
entry.title(info['title'])
entry.description(info['description'])
entry.enclosure(info['id'] + ".mp3", str(info['duration']), 'audio/mpeg')
def save(self):
self.fg.rss_file(name + '.xml')
示例8: export_feed
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def export_feed(self, output):
fg = FeedGenerator()
fg.load_extension('podcast')
fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')
fg.podcast.itunes_image("%s/icon.png" % URL_BASE)
fg.title('JW.ORG Magazines')
fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.')
fg.link(href="%s/%s" % (URL_BASE, output), rel='self')
manifest = self._load()
entries = []
for lang, mnemonics in manifest.items():
for mnemonic, issues in mnemonics.items():
for issue, data in issues.items():
entries.append((issue, data))
for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True):
fe = fg.add_entry()
fe.id( entry['hash'] )
fe.title( entry['title'] )
fe.description( entry['title'] )
fe.published( pytz.utc.localize( entry['created_on'] ) )
url = "%s/%s" % (URL_BASE, os.path.basename(entry['file']))
mime = 'audio/mpeg'
fe.enclosure(url, str(entry['duration']), mime)
fe.link(href=url, type=mime)
fg.rss_str(pretty=True)
fg.rss_file(os.path.join(CACHE_DIR, output))
示例9: rss
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def rss(conversation,
url,
author_name,
author_email,
title,
subtitle,
language,
output_path):
"""Export all the links of the conversation in a simple RSS feed"""
from feedgen.feed import FeedGenerator
fg = FeedGenerator()
fg.id(url)
fg.title(title)
fg.author(
{
'name': author_name,
'email': author_email,
}
)
fg.link(
href=url,
rel='alternate'
)
if subtitle:
fg.subtitle(subtitle)
fg.language(language)
for message in conversation.history():
match = re.search(
"^.*<(?P<url>[^>|]+)\|?(?P<title>[^>]+)?>.*$",
message.data["text"],
flags=re.MULTILINE
)
if match is not None:
fe = fg.add_entry()
link = match.group("url")
title = match.group("title") or link
date = naive_to_local(datetime.datetime.fromtimestamp(float(message.data["ts"])))
description = message.data["text"]
if "attachments" in message.data:
attachment = [a for a in message.data["attachments"] if
a["title_link"] == link][0]
title += " | " + attachment["title"]
description += """
""" + attachment["text"]
fe.id(link)
fe.title(title)
fe.link(href=link)
fe.published(date)
user = config.slack.get_user(message.data["user"])
author = {
"name": message.data["username"],
"email": user.email or "noemail",
}
fe.author(author)
fe.description(description)
fg.rss_file(output_path, pretty=True)
示例10: generate_feed
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def generate_feed(location, events):
fg = FeedGenerator()
fg.title('Upcoming Concerts in {}'.format(location.capitalize()))
fg.link(href='http://example.com', rel='alternate' )
fg.description('Upcoming rockin\' concerts')
for event in events.values():
fe = fg.add_entry()
fe.id(event['link'])
fe.title(event['groups'])
fe.description(u'{} / {} / {}'.format(event['date'], event['city_venue'], event['price']))
fe.link(href=event['link'])
fg.rss_file('html/feeds/{}.rss'.format(location))
示例11: listen_for_urls
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def listen_for_urls(self, msg, match):
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[[email protected]&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', str(msg))
p = re.compile('\/(.*)')
user = re.search(p, str(msg.getFrom())).group()[1:]
if len(url) == 1:
url = str(url[0])
filename = '/mnt/extern1/SYSTEM/www/foorss/' + user + '.xml'
fg = FeedGenerator()
# Some pages block urllib2 so we need a fake user agent
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(url, headers=header)
try:
soup = BeautifulSoup(urllib2.urlopen(req))
except urllib2.HTTPError, e:
print e.fp.read()
yield "Error while parsing the website..."
if os.path.isfile(filename):
fg.from_rss(filename)
else:
fg.id(user)
fg.title('Some Testfeed')
fg.link( href='http://nix.da', rel='alternate' )
fg.description('This is a cool feed!')
if soup.title != None:
title = soup.title.string
else:
title = url
fe = fg.add_entry()
fe.id(url)
fe.title(title)
fe.description('Description')
fe.link([{'href': url}])
fg.rss_file(filename)
yield title + ' from ' + user + ' (rss updated)'
示例12: make_rss
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def make_rss(user, link=False):
api = twitter.Api(**secrets)
if link:
filename = os.path.join(os.path.dirname(__file__), 'rss', user + '_links.rss')
try:
statuses = [s for s in api.GetUserTimeline(None, user, count=50) if len(s.urls) > 0]
except twitter.TwitterError as e:
return str(e), 404
else:
filename = os.path.join(os.path.dirname(__file__), 'rss', user + '.rss')
try:
statuses = api.GetUserTimeline(None, user)
except twitter.TwitterError as e:
return str(e), 404
if len(statuses) == 0:
return "No Tweets", 416
fg = FeedGenerator()
fg.title(user + ' on twitter')
fg.description('RSS feed from a twitter stream')
fg.link(href='http://twitter.com/' + statuses[0].GetUser().screen_name, rel='self')
for status in statuses:
fe = fg.add_entry()
fe.title(status.GetUser().screen_name+': '+status.GetText())
statusurl = 'http://twitter.com/' + statuses[0].GetUser().screen_name + '/status/' + status.GetIdStr()
fe.guid(statusurl)
fe.pubdate(status.created_at)
fe.link(href=statusurl, rel='alternate')
if link:
#fe.link(href=status.urls[0].expanded_url, rel='alternate')
urlsummary = '<br/> <ul>'
for url in status.urls:
urlsummary += '<a href="{0}">{0}</a> <br/>'.format(url.expanded_url)
urlsummary += '</ul>'
fe.summary(status.GetText() + '\n' + urlsummary)
else:
fe.summary(status.GetText())
fg.rss_file(filename)
return fg.rss_str()
示例13: generate_rss
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def generate_rss(pages_info=None):
fg = FeedGenerator()
fg.id(conf['base_url'])
fg.title(conf['title'])
fg.author( {'name':conf['author'],'email':conf['email']} )
fg.link( href=conf['base_url'], rel='alternate' )
fg.subtitle(conf['description'])
fg.link( href=conf['base_url']+'/rss.xml', rel='self' )
fg.language('en')
for post in pages_info:
fe = fg.add_entry()
fe.id('http://blog.elijahcaine.me/'+post['url'])
fe.title(post['title'])
fe.author( {'name':conf['author'],'email':conf['email']} )
fe.link( href=conf['base_url']+post['url'], rel='alternate' )
fe.description( post['content']['fragment'] )
rssfeed = fg.rss_str(pretty=True)
fg.rss_file('build/'+conf['rss_feed'])
return rssfeed
示例14: generate_feed
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def generate_feed(posts):
author = {'name': config['default']['author'], 'email': config['default']['email']}
fg = FeedGenerator()
fg.id('http://%s/rss.xml' % config['default']['domain'])
fg.title('%s RSS Feed' % config['default']['domain'])
fg.author(author)
fg.link(href='http://%s' % config['default']['domain'], rel='alternate')
fg.language('en')
fg.description('%s RSS Feed' % config['default']['domain'])
for post in posts[:10]:
fe = fg.add_entry()
fe.id('http://%s/posts/%s.html' % (config['default']['domain'], post.slug))
fe.title(post.title)
fe.content(content=post.body, type='html')
fe.author(author)
rssfeed = fg.rss_str(pretty=True)
fg.rss_file(os.path.join(config['default']['site_path'], 'rss.xml'))
示例15: export_rss
# 需要导入模块: from feedgen.feed import FeedGenerator [as 别名]
# 或者: from feedgen.feed.FeedGenerator import rss_file [as 别名]
def export_rss(list_of_updated_groups):
print "\nBeginning RSS feed generation."
print "Updated groups are:"
print list_of_updated_groups
for updated_group in list_of_updated_groups:
rss_file_path = os.getcwd() + "/static/rss/" + updated_group + ".xml"
fg = FeedGenerator()
fg.title("VTS Raspored - Grupa " + updated_group)
fg.author( {'name':'Veselin Romic','email':'[email protected]'} )
fg.language('sr')
fg.description("Automatski se salje notifikacija kad se promeni grupni raspored.")
fg.link(href='https://eref.vts.su.ac.rs/')
new_entry = fg.add_entry()
new_entry.title("RASPORED PROMENJEN - " + str(datetime.datetime.now()))
new_entry.description("Proverite eref da biste videli promene.")
new_entry.link(href='https://eref.vts.su.ac.rs/')
if os.path.isfile(rss_file_path):
current_feed = feedparser.parse(rss_file_path)
items_transferred = 0
for existing_list_item in current_feed["items"]:
if items_transferred >= 9:
break
transferred_entry = fg.add_entry()
transferred_entry.title(existing_list_item["title"])
transferred_entry.description("Proverite eref da biste videli promene.")
transferred_entry.link(href='https://eref.vts.su.ac.rs/')
items_transferred += 1
fg.rss_file(rss_file_path, pretty=True)
print "Updated RSS feed /static/rss/%s.xml" % updated_group