本文整理汇总了Python中pyutils.legislation.Bill.add_version方法的典型用法代码示例。如果您正苦于以下问题:Python Bill.add_version方法的具体用法?Python Bill.add_version怎么用?Python Bill.add_version使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyutils.legislation.Bill
的用法示例。
在下文中一共展示了Bill.add_version方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: scrape2009
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape2009(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/2009_10/sum/sb1.htm"
page = parse(url).getroot()
# Bill
try:
name = page.cssselect("#legislation h1")[0].text_content().strip()
except:
name = "Unknown"
bill = Bill(session, chamberName, number, name)
# Sponsorships
for a in page.cssselect("#sponsors a"):
bill.add_sponsor("", a.text_content().strip())
self.parse_votes(url, page, chamberName, bill)
# Actions
for row in page.cssselect("#history tr")[1:]:
date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
if "/" not in date:
continue
if action_text.startswith("Senate"):
bill.add_action("upper", action_text, date)
elif action_text.startswith("House"):
bill.add_action("lower", action_text, date)
# Versions
for row in page.cssselect("#versions a"):
bill.add_version(a.text_content(), urlparse.urljoin(url, a.get("href")))
self.add_bill(bill)
示例2: scrape2009
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape2009(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/2009_10/sum/sum/sb1.htm"
page = parse(url).getroot()
# Bill
name = page.cssselect('#legislation h1')[0].text_content().strip()
bill = Bill(session, chamberName, number, name)
# Sponsorships
for a in page.cssselect("#sponsors a"):
bill.add_sponsor('', a.text_content().strip())
# Actions
for row in page.cssselect('#history tr')[1:]:
date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
if '/' not in date:
continue
if action_text.startswith('Senate'):
bill.add_action('upper', action_text, date)
elif action_text.startswith('House'):
bill.add_action('lower', action_text, date)
# Versions
for row in page.cssselect('#versions a'):
bill.add_version(a.text_content(),
urlparse.urljoin(url, a.get('href')))
self.add_bill(bill)
示例3: scrape1999
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape1999(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/1999_00/leg/sum/sb1.htm"
page = parse(url).getroot()
# Grab the interesting tables on the page.
tables = page.cssselect("table")
# Bill
name = tables[1].cssselect("a")[0].text_content().split("-", 1)[1]
bill = Bill(session, chamberName, number, name)
# Versions
bill.add_version("Current", url.replace("/sum/", "/fulltext/"))
# Sponsorships
for a in tables[2].cssselect("a"):
bill.add_sponsor("", a.text_content().strip())
self.parse_votes_1999(url, page, chamberName, bill)
# Actions
for row in tables[-1].cssselect("tr"):
senate_date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
house_date = row[2].text_content().strip()
if "/" not in senate_date and "/" not in house_date:
continue
if senate_date:
bill.add_action("upper", action_text, senate_date)
if house_date:
bill.add_action("lower", action_text, house_date)
self.add_bill(bill)
示例4: scrape2003
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape2003(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/2003_04/sum/sum/sb1.htm"
page = parse(url).getroot()
# Grab the interesting tables on the page.
tables = page.cssselect('center table')
# Bill
name = tables[0].text_content().split('-', 1)[1]
bill = Bill(session, chamberName, number, name)
# Sponsorships
for a in tables[1].cssselect('a'):
bill.add_sponsor('', a.text_content().strip())
# Actions
center = page.cssselect('center table center')[0]
for row in center.cssselect('table')[-2].cssselect('tr')[2:]:
date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
if '/' not in date:
continue
if action_text.startswith('Senate'):
bill.add_action('upper', action_text, date)
elif action_text.startswith('House'):
bill.add_action('lower', action_text, date)
# Versions
for row in center.cssselect('table')[-1].cssselect('a'):
bill.add_version(a.text_content(),
urlparse.urljoin(url, a.get('href')))
self.add_bill(bill)
示例5: scrape2003
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape2003(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/2003_04/sum/sb1.htm"
page = parse(url).getroot()
# Grab the interesting tables on the page.
tables = page.cssselect("center table")
# Bill
name = tables[0].text_content().split("-", 1)[1]
bill = Bill(session, chamberName, number, name)
# Sponsorships
for a in tables[1].cssselect("a"):
bill.add_sponsor("", a.text_content().strip())
self.parse_votes_2001_2004(url, page, chamberName, bill)
# Actions
center = page.cssselect("center table center")[0]
for row in center.cssselect("table")[-2].cssselect("tr")[2:]:
date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
if "/" not in date:
continue
if action_text.startswith("Senate"):
bill.add_action("upper", action_text, date)
elif action_text.startswith("House"):
bill.add_action("lower", action_text, date)
# Versions
for row in center.cssselect("table")[-1].cssselect("a"):
bill.add_version(a.text_content(), urlparse.urljoin(url, a.get("href")))
self.add_bill(bill)
示例6: scrape1999
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape1999(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/1999_00/leg/sum/sb1.htm"
page = parse(url).getroot()
# Grab the interesting tables on the page.
tables = page.cssselect('table')
# Bill
name = tables[1].cssselect('a')[0].text_content().split('-', 1)[1]
bill = Bill(session, chamberName, number, name)
# Versions
bill.add_version('Current', url.replace('/sum/', '/fulltext/'))
# Sponsorships
for a in tables[2].cssselect('a'):
bill.add_sponsor('', a.text_content().strip())
# Actions
for row in tables[-1].cssselect('tr'):
senate_date = row[0].text_content().strip()
action_text = row[1].text_content().strip()
house_date = row[2].text_content().strip()
if '/' not in senate_date and '/' not in house_date:
continue
if senate_date:
bill.add_action('upper', action_text, senate_date)
if house_date:
bill.add_action('lower', action_text, house_date)
self.add_bill(bill)
示例7: get_bill_info
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def get_bill_info(self, chamber, session, bill_detail_url):
"""Extracts all the requested info for a given bill.
Calls the parent's methods to enter the results into CSV files.
"""
bill_detail_url_base='https://www.revisor.leg.state.mn.us/revisor/pages/search_status/'
bill_detail_url = urlparse.urljoin(bill_detail_url_base, bill_detail_url)
if chamber == "House":
chamber = 'lower'
else:
chamber = 'upper'
with self.soup_context(bill_detail_url) as bill_soup:
bill_id = self.extract_bill_id(bill_soup)
bill_title = self.extract_bill_title(bill_soup)
bill = Bill(session, chamber, bill_id, bill_title)
# get all versions of the bill.
# Versions of a bill are on a separate page, linked to from the bill
# details page in a link titled, "Bill Text".
version_url_base = 'https://www.revisor.leg.state.mn.us'
bill_version_link = self.extract_bill_version_link(bill_soup)
version_detail_url = urlparse.urljoin(version_url_base, bill_version_link)
with self.soup_context(version_detail_url) as version_soup:
# MN bills can have multiple versions. Get them all, and loop over
# the results, adding each one.
bill_versions = self.extract_bill_versions(version_soup)
for version in bill_versions:
version_name = version['name']
version_url = urlparse.urljoin(version_url_base, version['url'])
bill.add_version(version_name, version_url)
# grab primary and cosponsors
# MN uses "Primary Author" to name a bill's primary sponsor.
# Everyone else listed will be added as a 'cosponsor'.
sponsors = self.extract_bill_sponsors(bill_soup)
primary_sponsor = sponsors[0]
cosponsors = sponsors[1:]
bill.add_sponsor('primary', primary_sponsor)
for leg in cosponsors:
bill.add_sponsor('cosponsor', leg)
# Add Actions performed on the bill.
bill_actions = self.extract_bill_actions(bill_soup, chamber)
for action in bill_actions:
action_chamber = action['action_chamber']
action_date = action['action_date']
action_text = action['action_text']
bill.add_action(action_chamber, action_text, action_date)
self.add_bill(bill)
示例8: scrape_bill
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape_bill(self, chamber, session, billid, histurl, year):
if year[0] != 'R':
session = year
else:
session = self.metadata['session_details'][year][
'sub_sessions'][int(year[0]) - 1]
with self.urlopen_context(histurl) as data:
soup = BS(cleansource(data))
basicinfo = soup.findAll('div', id='bhistleft')[0]
hist = basicinfo.table
sponsor = None
title = None
for b in basicinfo.findAll('b'):
if b.next.startswith('SUMMARY'):
title = b.findNextSiblings(text=True)[0].strip()
elif b.next.startswith('SPONSOR'):
for a in b.findNextSiblings('a'):
if not issponsorlink(a):
break
sponsor = cleansponsor(a.contents[0])
bill = Bill(session, chamber, billid, title)
if sponsor:
bill.add_sponsor('primary', sponsor)
for row in hist.findAll('tr'):
link = row.td.a
vlink = urlbase % link['href']
vname = link.contents[0].strip()
bill.add_version(vname, vlink)
history = soup.findAll('div', id='bhisttab')[0].table
rows = history.findAll('tr')[1:]
for row in rows:
tds = row.findAll('td')
if len(tds) < 2:
# This is not actually an action
continue
date, action = row.findAll('td')[:2]
date = dt.datetime.strptime(date.contents[0], '%m/%d/%y')
action = action.contents[0].strip()
if 'House' in action:
actor = 'lower'
elif 'Senate' in action:
actor = 'upper'
else: # for lack of a better
actor = chamber
bill.add_action(actor, action, date)
self.add_bill(bill)
示例9: parse_bill
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def parse_bill(self, chamber, session, bill_id, bill_info_url):
with self.urlopen_context(bill_info_url) as bill_info_data:
bill_info = self.soup_parser(bill_info_data)
version_url = '%s/bill.doc' % bill_id
version_link = bill_info.find(href=version_url)
if not version_link:
# This bill was withdrawn
return
bill_title = version_link.findNext('p').contents[0].strip()
bill = Bill(session, chamber, bill_id, bill_title)
bill.add_version("Most Recent Version",
session_url(session) + version_url)
bill.add_source(bill_info_url)
sponsor_links = bill_info.findAll(href=re.compile(
'legislator/[SH]\d+\.htm'))
for sponsor_link in sponsor_links:
bill.add_sponsor('primary', sponsor_link.contents[0].strip())
action_p = version_link.findAllNext('p')[-1]
for action in action_p.findAll(text=True):
action = action.strip()
if (not action or action == 'last action' or
'Prefiled' in action):
continue
action_date = action.split('-')[0]
action_date = dt.datetime.strptime(action_date, '%b %d')
# Fix:
action_date = action_date.replace(
year=int('20' + session[2:4]))
action = '-'.join(action.split('-')[1:])
if action.endswith('House') or action.endswith('(H)'):
actor = 'lower'
elif action.endswith('Senate') or action.endswith('(S)'):
actor = 'upper'
else:
actor = chamber
bill.add_action(actor, action, action_date)
vote_link = bill_info.find(href=re.compile('.*/vote_history.pdf'))
if vote_link:
bill.add_document(
'vote_history.pdf',
bill_info_url.replace('.htm', '') + "/vote_history.pdf")
self.add_bill(bill)
示例10: scrape_session
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape_session(self, chamber, session):
if chamber == "lower":
bill_abbr = "HB"
else:
bill_abbr = "SB"
bill_list_url = "http://www.le.state.ut.us/~%s/bills.htm" % (
session.replace(' ', ''))
self.log("Getting bill list for %s, %s" % (session, chamber))
try:
base_bill_list = self.soup_parser(self.urlopen(bill_list_url))
except:
# this session doesn't exist for this year
return
bill_list_link_re = re.compile('.*%s\d+ht.htm$' % bill_abbr)
for link in base_bill_list.findAll('a', href=bill_list_link_re):
bill_list = self.soup_parser(self.urlopen(link['href']))
bill_link_re = re.compile('.*billhtm/%s.*.htm' % bill_abbr)
for bill_link in bill_list.findAll('a', href=bill_link_re):
bill_id = bill_link.find(text=True).strip()
bill_info_url = bill_link['href']
bill_info = self.soup_parser(self.urlopen(bill_info_url))
bill_title, primary_sponsor = bill_info.h3.contents[2].replace(
' ', ' ').strip().split(' -- ')
bill = Bill(session, chamber, bill_id, bill_title)
bill.add_source(bill_info_url)
bill.add_sponsor('primary', primary_sponsor)
status_re = re.compile('.*billsta/%s.*.htm' %
bill_abbr.lower())
status_link = bill_info.find('a', href=status_re)
if status_link:
self.parse_status(bill, status_link['href'])
text_find = bill_info.find(
text="Bill Text (If you are having trouble viewing")
if text_find:
text_link_re = re.compile('.*\.htm')
for text_link in text_find.parent.parent.findAll(
'a', href=text_link_re)[1:]:
version_name = text_link.previous.strip()
bill.add_version(version_name, text_link['href'])
self.add_bill(bill)
示例11: get_bill_info
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def get_bill_info(self, chamber, session, bill_detail_url, version_list_url):
"""Extracts all the requested info for a given bill.
Calls the parent's methods to enter the results into JSON files.
"""
if chamber == "House":
chamber = 'lower'
else:
chamber = 'upper'
with self.soup_context(bill_detail_url) as bill_soup:
bill_id = self.extract_bill_id(bill_soup)
bill_title = self.extract_bill_title(bill_soup)
bill = Bill(session, chamber, bill_id, bill_title)
# Get all versions of the bill.
# Versions of a bill are on a separate page, linked to from the column
# labeled, "Bill Text", on the search results page.
with self.soup_context(version_list_url) as version_soup:
# MN bills can have multiple versions. Get them all, and loop over
# the results, adding each one.
self.debug("Extracting bill versions from: " + version_list_url)
bill_versions = self.extract_bill_versions(version_soup)
for version in bill_versions:
version_name = version['name']
version_url = urlparse.urljoin(VERSION_URL_BASE, version['url'])
bill.add_version(version_name, version_url)
# grab primary and cosponsors
# MN uses "Primary Author" to name a bill's primary sponsor.
# Everyone else listed will be added as a 'cosponsor'.
sponsors = self.extract_bill_sponsors(bill_soup)
primary_sponsor = sponsors[0]
cosponsors = sponsors[1:]
bill.add_sponsor('primary', primary_sponsor)
for leg in cosponsors:
bill.add_sponsor('cosponsor', leg)
# Add Actions performed on the bill.
bill_actions = self.extract_bill_actions(bill_soup, chamber)
for action in bill_actions:
action_chamber = action['action_chamber']
action_date = action['action_date']
action_text = action['action_text']
bill.add_action(action_chamber, action_text, action_date)
self.add_bill(bill)
示例12: scrape1995
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape1995(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/1995_96/leg/sum/sb1.htm"
page = parse(url).getroot()
# Bill
name = page.cssselect('h3 br')[0].tail.split('-', 1)[1].strip()
bill = Bill(session, chamberName, number, name)
# Versions
bill.add_version('Current', url.replace('/sum/', '/fulltext/'))
# Sponsorships
rows = page.cssselect('center table tr')
for row in rows:
if row.text_content().strip() == 'Sponsor and CoSponsors':
continue
if row.text_content().strip() == 'Links / Committees / Status':
break
for a in row.cssselect('a'):
bill.add_sponsor('', a.text_content().strip())
# Actions
# The actions are in a pre table that looks like:
""" SENATE HOUSE
-------------------------------------
1/13/95 Read 1st time 2/6/95
1/31/95 Favorably Reported
2/1/95 Read 2nd Time 2/7/95
2/3/95 Read 3rd Time
2/3/95 Passed/Adopted """
actions = page.cssselect('pre')[0].text_content().split('\n')
actions = actions[2:]
for action in actions:
senate_date = action[:22].strip()
action_text = action[23:46].strip()
house_date = action[46:].strip()
if '/' not in senate_date and '/' not in house_date:
continue
if senate_date:
bill.add_action('upper', action_text, senate_date)
if house_date:
bill.add_action('lower', action_text, house_date)
self.add_bill(bill)
示例13: scrape1995
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape1995(self, url, year, chamberName, session, number):
"e.g. http://www.legis.ga.gov/legis/1995_96/leg/sum/sb1.htm"
page = parse(url).getroot()
# Bill
name = page.cssselect("h3 br")[0].tail.split("-", 1)[1].strip()
bill = Bill(session, chamberName, number, name)
# Versions
bill.add_version("Current", url.replace("/sum/", "/fulltext/"))
# Sponsorships
rows = page.cssselect("center table tr")
for row in rows:
if row.text_content().strip() == "Sponsor and CoSponsors":
continue
if row.text_content().strip() == "Links / Committees / Status":
break
for a in row.cssselect("a"):
bill.add_sponsor("", a.text_content().strip())
# Actions
# The actions are in a pre table that looks like:
""" SENATE HOUSE
-------------------------------------
1/13/95 Read 1st time 2/6/95
1/31/95 Favorably Reported
2/1/95 Read 2nd Time 2/7/95
2/3/95 Read 3rd Time
2/3/95 Passed/Adopted """
actions = page.cssselect("pre")[0].text_content().split("\n")
actions = actions[2:]
for action in actions:
senate_date = action[:22].strip()
action_text = action[23:46].strip()
house_date = action[46:].strip()
if "/" not in senate_date and "/" not in house_date:
continue
if senate_date:
bill.add_action("upper", action_text, senate_date)
if house_date:
bill.add_action("lower", action_text, house_date)
self.add_bill(bill)
示例14: parse_bill
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def parse_bill(self, chamber, session, bill_id, bill_info_url):
with self.urlopen_context(bill_info_url) as bill_info_data:
bill_info = self.soup_parser(bill_info_data)
version_url = "%s/bill.doc" % bill_id
version_link = bill_info.find(href=version_url)
if not version_link:
# This bill was withdrawn
return
bill_title = version_link.findNext("p").contents[0].strip()
bill = Bill(session, chamber, bill_id, bill_title)
bill.add_version("Most Recent Version", session_url(session) + version_url)
bill.add_source(bill_info_url)
sponsor_links = bill_info.findAll(href=re.compile("legislator/[SH]\d+\.htm"))
for sponsor_link in sponsor_links:
bill.add_sponsor("primary", sponsor_link.contents[0].strip())
action_p = version_link.findAllNext("p")[-1]
for action in action_p.findAll(text=True):
action = action.strip()
if not action or action == "last action" or "Prefiled" in action:
continue
action_date = action.split("-")[0]
action_date = dt.datetime.strptime(action_date, "%b %d")
# Fix:
action_date = action_date.replace(year=int("20" + session[2:4]))
action = "-".join(action.split("-")[1:])
if action.endswith("House") or action.endswith("(H)"):
actor = "lower"
elif action.endswith("Senate") or action.endswith("(S)"):
actor = "upper"
else:
actor = chamber
bill.add_action(actor, action, action_date)
vote_link = bill_info.find(href=re.compile(".*/vote_history.pdf"))
if vote_link:
bill.add_document("vote_history.pdf", bill_info_url.replace(".htm", "") + "/vote_history.pdf")
self.save_bill(bill)
示例15: scrape_bills
# 需要导入模块: from pyutils.legislation import Bill [as 别名]
# 或者: from pyutils.legislation.Bill import add_version [as 别名]
def scrape_bills(self,chamber,year):
if int(year) %2 == 0:
raise NoDataForYear(year)
#
year = int(year)
oyear = year #save off the original of the session
if chamber == 'upper':
bill_no = 1
abbr = 'SB'
else:
bill_no = 4001
abbr = 'HB'
while True:
(bill_page,year) = self.scrape_bill(year, abbr, bill_no)
# if we can't find a page, we must be done. This is a healthy thing.
if bill_page == None: return
title = ''.join(self.flatten(bill_page.findAll(id='frg_billstatus_ObjectSubject')[0]))
title = title.replace('\n','').replace('\r','')
bill_id = "%s %d" % (abbr, bill_no)
the_bill = Bill("Regular Session %d" % oyear, chamber, bill_id, title)
#sponsors
first = 0
for name in bill_page.findAll(id='frg_billstatus_SponsorList')[0].findAll('a'):
the_bill.add_sponsor(['primary', 'cosponsor'][first], name.string)
first = 1
#versions
for doc in bill_page.findAll(id='frg_billstatus_DocumentGridTable')[0].findAll('tr'):
r = self.parse_doc(the_bill, doc)
if r: the_bill.add_version(*r)
#documents
if 'frg_billstatus_HlaTable' in str(bill_page):
for doc in bill_page.findAll(id='frg_billstatus_HlaTable')[0].findAll('tr'):
r = self.parse_doc(the_bill, doc)
if r: the_bill.add_document(*r)
if 'frg_billstatus_SfaSection' in str(bill_page):
for doc in bill_page.findAll(id='frg_billstatus_SfaSection')[0].findAll('tr'):
r = self.parse_doc(the_bill, doc)
if r: the_bill.add_document(*r)
the_bill.add_source('http://legislature.mi.gov/doc.aspx?%d-%s-%04d' % (year, abbr, bill_no))
self.parse_actions(the_bill, bill_page.findAll(id='frg_billstatus_HistoriesGridView')[0])
self.add_bill(the_bill)
bill_no = bill_no + 1
pass