本文整理汇总了Python中pyopenmensa.feed.LazyBuilder.toXMLFeed方法的典型用法代码示例。如果您正苦于以下问题:Python LazyBuilder.toXMLFeed方法的具体用法?Python LazyBuilder.toXMLFeed怎么用?Python LazyBuilder.toXMLFeed使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyopenmensa.feed.LazyBuilder
的用法示例。
在下文中一共展示了LazyBuilder.toXMLFeed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False, canteentype="Mittagsmensa", this_week="", next_week=True, legend_url=None):
canteen = LazyBuilder()
canteen.legendKeyFunc = lambda v: v.lower()
if not legend_url:
legend_url = url[: url.find("essen/") + 6] + "wissenswertes/lebensmittelkennzeichnung"
legend_doc = parse(urlopen(legend_url)).find(id="artikel")
allergene = buildLegend(
text=legend_doc.text.replace("\xa0", " "), regex=r"(?P<name>[A-Z]+) {3,}enthält (?P<value>\w+( |\t|\w)*)"
)
allergene["EI"] = "Ei"
zusatzstoffe = buildLegend(
text=legend_doc.text.replace("\xa0", " "), regex=r"(?P<name>\d+) {3,} (enthält )?(?P<value>\w+( |\t|\w)*)"
)
for tr in legend_doc.find_all("tr"):
tds = tr.find_all("td")
if len(tds) != 2:
continue
title = tds[0].find("strong")
if title is None:
continue
else:
title = title.text
text = tds[1].text.replace("enthält", "").strip()
if title.isdigit():
zusatzstoffe[title] = text
else:
allergene[title] = text
parse_week(url + this_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
if not today and next_week is True:
parse_week(url + "-kommende-woche", canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
if not today and type(next_week) is str:
parse_week(url + next_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
print(canteen.toXMLFeed())
return canteen.toXMLFeed()
示例2: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
canteen = LazyBuilder()
legend = {'f': 'fleischloses Gericht', 'v': 'veganes Gericht'}
document = parse(urlopen(base + '/speiseplan/zusatzstoffe-de.html').read())
for td in document.find_all('td', 'beschreibung'):
legend[td.previous_sibling.previous_sibling.text] = td.text
document = parse(urlopen(base + '/unsere-preise/').read())
prices = {}
for tr in document.find('table', 'essenspreise').find_all('tr'):
meal = tr.find('th')
if not meal or not meal.text.strip():
continue
if len(tr.find_all('td', 'betrag')) < 3:
continue
if 'titel' in meal.attrs.get('class', []) or 'zeilentitel' in meal.attrs.get('class', []):
continue
meal = meal.text.strip()
prices[meal] = {}
for role, _id in [('student', 0), ('employee', 1), ('other', 2)]:
price_html = tr.find_all('td', 'betrag')[_id].text
price_search = price_regex.search(price_html)
if price_search:
prices[meal][role] = price_search.group('price')
errorCount = 0
date = datetime.date.today()
while errorCount < 7:
try:
document = parse(urlopen(url.format(date)).read())
except HTTPError as e:
if e.code == 404:
errorCount += 1
date += datetime.date.resolution
continue
else:
raise e
else:
errorCount = 0
for tr in document.find('table', 'zusatzstoffe').find_all('tr'):
identifier = tr.find_all('td')[0].text \
.replace('(', '').replace(')', '')
legend[identifier] = tr.find_all('td')[1].text.strip()
canteen.setLegendData(legend)
mensa_data = document.find('table', 'menu')
category = None
for menu_tr in mensa_data.find_all('tr'):
if menu_tr.find('td', 'headline'):
continue
if menu_tr.find('td', 'gericht').text:
category = menu_tr.find('td', 'gericht').text
data = menu_tr.find('td', 'beschreibung')
name = data.find('span').text.strip()
notes = [span['title'] for span in data.find_all('span', title=True)]
canteen.addMeal(
date, category, name, notes,
prices.get(category.replace('Aktionsessen', 'Bio-/Aktionsgericht'), {})
)
date += datetime.date.resolution
if today:
break
return canteen.toXMLFeed()
示例3: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
canteen = LazyBuilder()
parse_week(url + '.html', canteen)
if not today:
parse_week(url + '-w1.html', canteen)
parse_week(url + '-w2.html', canteen)
return canteen.toXMLFeed()
示例4: metadata
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def metadata(self, request):
meta = LazyBuilder(version=self.parser.version)
meta.feeds.append(Feed(
name='today',
hour='8-14',
url='/'.join([request.host, self.parser.name, self.name, 'today.xml']),
priority=0,
source=None,
dayOfMonth='*',
dayOfWeek='*',
minute='0',
retry=None
))
meta.feeds.append(Feed(
name='full',
hour='8',
url='/'.join([request.host, self.parser.name, self.name, 'full.xml']),
priority=0,
source=None,
dayOfMonth='*',
dayOfWeek='*',
minute='0',
retry=None
))
return meta.toXMLFeed()
示例5: feed_all
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def feed_all(self, name):
canteen = LazyBuilder()
date = self.__now()
# Get this week
lastWeekday = -1
while self.handler(canteen, self.xml2locId[name], date.date()):
date += datetime.timedelta(days=1)
if lastWeekday > date.weekday():
break
lastWeekday = date.weekday()
# Skip over weekend
if date.weekday() > 4:
date += datetime.timedelta(days=7-date.weekday())
# Get next week
lastWeekday = -1
while self.handler(canteen, self.xml2locId[name], date.date()):
date += datetime.timedelta(days=1)
if lastWeekday > date.weekday():
break
lastWeekday = date.weekday()
return canteen.toXMLFeed()
示例6: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, mensa, *weeks, today):
canteen = LazyBuilder()
for week in weeks:
parse_week(url + week, canteen, mensa)
if today:
break
return canteen.toXMLFeed()
示例7: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
canteen = LazyBuilder()
parse_week(url + (datetime.date.today()
+ datetime.date.resolution * 7).strftime('/%Y/%W/'), canteen)
if not today:
parse_week(url + (datetime.date.today()
+ datetime.date.resolution * 14).strftime('/%Y/%W/'), canteen)
return canteen.toXMLFeed()
示例8: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
canteen = LazyBuilder()
day = datetime.date.today()
for _ in range(21):
parse_day(canteen, '{}&date={}'.format(url, day.strftime('%Y-%m-%d')))
if today:
break
day += datetime.timedelta(days=1)
return canteen.toXMLFeed()
示例9: feed
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def feed(self, name):
canteen = LazyBuilder()
if name in self.xmlnames:
parse_url(canteen, name) # all categories
else :
xmlname_enty = [x for x in self.xmlnames if x[0] == name][0]
parse_url(canteen, *xmlname_enty) # only certain categories
return canteen.toXMLFeed()
示例10: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today):
canteen = LazyBuilder()
canteen.setAdditionalCharges('student', {})
if today:
parse_week(url, canteen) # base url only contains current day
else:
parse_week(url + 'week', canteen)
parse_week(url + 'nextweek', canteen)
return canteen.toXMLFeed()
示例11: parsePlan
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parsePlan(url, internalMensaId, today):
canteen = LazyBuilder()
end = False
while (url != None):
dom = BeautifulSoup(urlopen(url).read(), 'lxml')
date = dom.select('#mensa_date > p')[0].contents[0]
menuDefinition = dom.find(id=internalMensaId)
menuDescription = menuDefinition.parent.find('dd')
tables = menuDescription.select('table')
legend = {}
legend = buildLegend(legend, str(dom), regex='<strong>(?P<name>\w+)\s*</strong>\s*-\s*(?P<value>[\w\s)(]+)')
if tables != None and len(tables) == 1:
table = tables[0]
rows = table.find_all('tr')
for row in rows:
menuNameElement = row.select('td[class="mensa_col_55"] > b')
if menuNameElement != None and menuNameElement[0].contents != None:
menuName = menuNameElement[0].contents[0]
category = 'Gericht'
# get notes
notes = {}
notesElement = row.select('td[class="mensa_col_55"] > span')
if notesElement != None and len(notesElement) > 0 and notesElement[0].text != None:
notes = [legend.get(n, n) for n in notesElement[0].text.split(' ') if n]
# get prices
prices = {}
for td in row.select('td[class="mensa_col_15"]'):
priceElement = td.find('b')
groupElement = td.find('span')
if priceElement != None and groupElement != None and groupElement.contents != None and len(groupElement.contents) > 0 and priceElement.contents != None and len(priceElement.contents) > 0:
group = str(groupElement.contents[0])
price = str(priceElement.contents[0])
if group == 'Stud.:':
prices['student'] = price
elif group == 'Bed.:':
prices['employee'] = price
elif group == 'Gast:':
prices['other'] = price
canteen.addMeal(date, category, menuName, notes, prices)
else:
canteen.setDayClosed(date)
# check for further pages
nextPageLink = dom.find(id='next_day_link')
if nextPageLink == None or today:
url = None
else:
url = 'https://www.studentenwerk-rostock.de/' + nextPageLink['href']
return canteen.toXMLFeed()
示例12: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
base_data = load_base_data()
canteen = LazyBuilder()
with urlopen(url) as response:
data = json.loads(response.read().decode())
for day in data['days']:
date = datetime.datetime.strptime(day['date'], UTC_DATE_STRING).date()
if today and (datetime.date.today() != date):
continue
for counter in day['counters']:
counter_name = counter['displayName']
counter_description = counter['description']
counter_hours = counter.get('openingHours')
for meal in counter['meals']:
if 'knownMealId' in meal:
# This is meant to allow recognizing recurring meals,
# for features like marking meals as favorites.
# Up to now, not really used in the mensaar.de API,
# nor functional in this API parser.
# The meal will still be recognized as every other meal.
print('knownMealId: %s' % meal['knownMealId'], file=sys.stderr)
meal_name = meal['name']
if 'category' in meal:
meal_name = '%s: %s' % (meal['category'], meal_name)
meal_notes = (
# The description is typically the location
# (but not required to be by the API specification).
build_location(counter_description) +
build_hours(counter_hours) +
build_notes(base_data, meal['notices'], meal['components']))
meal_prices = {}
if 'prices' in meal:
prices = meal['prices']
for role in prices:
if role in ROLES:
meal_prices[base_data['roles'][role]] = prices[role]
if 'pricingNotice' in meal:
meal_notes.append(meal['pricingNotice'])
canteen.addMeal(date, counter_name,
meal_name, meal_notes, meal_prices)
return canteen.toXMLFeed()
示例13: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
canteen = LazyBuilder()
day = datetime.date.today()
emptyCount = 0
while emptyCount < 7:
if not parse_day(canteen, '{}&day={}&month={}&year={}&limit=25'
.format(url, day.day, day.month, day.year),
day.strftime('%Y-%m-%d')):
emptyCount += 1
else:
emptyCount = 0
if today:
break
day += datetime.date.resolution
return canteen.toXMLFeed()
示例14: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False):
content = urlopen(url).read()
document = parse(content)
legends = document.find_all('div', {'class': 'legende'})
if len(legends) > 0:
extraLegend = {int(v[0]): v[1] for v in reversed(legend_regex.findall(legends[0].text))}
else:
extraLegend = {}
canteen = LazyBuilder()
for day_td in document.find_all('td', text=day_regex):
date = day_regex.search(day_td.string).group('date')
table = None
for element in day_td.parents:
if element.name == 'table':
table = element
break
if not table:
continue
for tr in table.tbody.find_all('tr'):
if 'geschlossen' in tr.text or 'Feiertage' in tr.text:
match = day_range_regex.search(tr.text)
if not match:
canteen.setDayClosed(date)
else:
fromDate = datetime.datetime.strptime(match.group('from'), '%d.%m.%Y')
toDate = datetime.datetime.strptime(match.group('to'), '%d.%m.%Y')
while fromDate <= toDate:
canteen.setDayClosed(fromDate.strftime('%Y-%m-%d'))
fromDate += datetime.date.resolution
continue
if len(tr) != 3:
continue # no meal
strings = list(tr.contents[0].strings)
name = strings[0]
# prices:
prices = strings[-1].split('|')
print(prices)
if '-' in map(lambda v: v.strip(), prices):
prices = {}
# notes:
notes = []
for img in tr.contents[1].find_all('img'):
notes.append(img['alt'].replace('Symbol', '').strip())
for extra in list(set(map(lambda v: int(v), extra_regex.findall(tr.text)))):
if extra in extraLegend:
notes.append(extraLegend[extra])
canteen.addMeal(date, 'Hauptgerichte', name, notes, prices, roles if prices else None)
return canteen.toXMLFeed()
示例15: parse_url
# 需要导入模块: from pyopenmensa.feed import LazyBuilder [as 别名]
# 或者: from pyopenmensa.feed.LazyBuilder import toXMLFeed [as 别名]
def parse_url(url, today=False, canteentype='Mittagsmensa', this_week='', next_week=True, legend_url=None):
canteen = LazyBuilder()
canteen.legendKeyFunc = lambda v: v.lower()
if not legend_url:
legend_url = url[:url.find('essen/') + 6] + 'lebensmittelkennzeichnung'
legend_doc = parse(urlopen(legend_url))
canteen.setLegendData(
text=legend_doc.find(id='artikel').text,
regex=r'(?P<name>(\d+|[A-Z]+))\s+=\s+(?P<value>\w+( |\t|\w)*)'
)
parse_week(url + this_week, canteen, canteentype)
if not today and next_week is True:
parse_week(url + '-kommende-woche', canteen, canteentype)
if not today and type(next_week) is str:
parse_week(url + next_week, canteen, canteentype)
return canteen.toXMLFeed()