本文整理汇总了Python中fiftystates.scrape.votes.Vote类的典型用法代码示例。如果您正苦于以下问题:Python Vote类的具体用法?Python Vote怎么用?Python Vote使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Vote类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: scrape_votes
def scrape_votes(self, link, chamber, bill):
with self.lxml_context(link) as votes_page:
page_tables = votes_page.cssselect("table")
votes_table = page_tables[0]
votes_elements = votes_table.cssselect("td")
# Eliminate table headings and unnecessary element
votes_elements = votes_elements[3 : len(votes_elements)]
ve = self.grouper(5, votes_elements)
for actor, date, name_and_text, name, text in ve:
if "cow" in text.text_content() or "COW" in text.text_content():
continue
vote_date = dt.datetime.strptime(date.text_content(), "%m/%d/%Y")
motion_and_votes = text.text_content().lstrip("FINAL VOTE - ")
motion, sep, votes = motion_and_votes.partition(".")
if "passed" in votes:
passed = True
else:
passed = False
votes_match = re.search("([0-9]+)-([0-9]+)-?([0-9]+)?", votes)
yes_count = votes_match.group(1)
no_count = votes_match.group(2)
other_count = votes_match.group(3)
if other_count == None:
other_count = 0
vote = Vote(chamber, vote_date, motion, passed, yes_count, no_count, other_count)
vote.add_source(link)
bill.add_vote(vote)
示例2: scrape
def scrape(self, chamber, session):
self.validate_session(session)
if chamber == 'upper':
other_chamber = 'lower'
bill_id = 'SB 1'
else:
other_chamber = 'upper'
bill_id = 'HB 1'
b1 = Bill(session, chamber, bill_id, 'A super bill')
b1.add_source('http://example.com/')
b1.add_version('As Introduced', 'http://example.com/SB1.html')
b1.add_document('Google', 'http://google.com')
b1.add_sponsor('primary', 'Bob Smith')
b1.add_sponsor('secondary', 'Johnson, Sally')
d1 = datetime.datetime.strptime('1/29/2010', '%m/%d/%Y')
v1 = Vote('upper', d1, 'Final passage', True, 2, 0, 0)
v1.yes('Smith')
v1.yes('Johnson')
d2 = datetime.datetime.strptime('1/30/2010', '%m/%d/%Y')
v2 = Vote('lower', d2, 'Final passage', False, 0, 1, 1)
v2.no('Bob Smith')
v2.other('S. Johnson')
b1.add_vote(v1)
b1.add_vote(v2)
b1.add_action(chamber, 'introduced', d1)
b1.add_action(chamber, 'read first time', d2)
b1.add_action(other_chamber, 'introduced', d2)
self.save_bill(b1)
示例3: add_vote
def add_vote(self, bill, chamber, date, line, text):
votes = re.findall(r'Ayes (\d+)\, Noes (\d+)', text)
(yes, no) = int(votes[0][0]), int(votes[0][1])
vtype = 'other'
for regex, type in motion_classifiers.iteritems():
if re.match(regex, text):
vtype = type
break
v = Vote(chamber, date, text, yes > no, yes, no, 0, type=vtype)
# fetch the vote itself
link = line.xpath('//a[contains(@href, "/votes/")]')
if link:
link = link[0].get('href')
v.add_source(link)
filename, resp = self.urlretrieve(link)
if 'av' in link:
self.add_house_votes(v, filename)
elif 'sv' in link:
self.add_senate_votes(v, filename)
bill.add_vote(v)
示例4: parse_vote
def parse_vote(self, bill, action, act_chamber, act_date, url):
url = "http://www.legis.state.ak.us/basis/%s" % url
info_page = self.soup_parser(self.urlopen(url))
tally = re.findall('Y(\d+) N(\d+)\s*(?:\w(\d+))*\s*(?:\w(\d+))*'
'\s*(?:\w(\d+))*', action)[0]
yes, no, o1, o2, o3 = map(lambda x: 0 if x == '' else int(x), tally)
yes, no, other = int(yes), int(no), (int(o1) + int(o2) + int(o3))
votes = info_page.findAll('pre', text=re.compile('Yeas'),
limit=1)[0].split('\n\n')
motion = info_page.findAll(text=re.compile('The question being'))[0]
motion = re.findall('The question being:\s*"(.*)\?"',
motion, re.DOTALL)[0].replace('\n', ' ')
vote = Vote(act_chamber, act_date, motion, yes > no, yes, no, other)
for vote_list in votes:
vote_type = False
if vote_list.startswith('Yeas: '):
vote_list, vote_type = vote_list[6:], vote.yes
elif vote_list.startswith('Nays: '):
vote_list, vote_type = vote_list[6:], vote.no
elif vote_list.startswith('Excused: '):
vote_list, vote_type = vote_list[9:], vote.other
elif vote_list.startswith('Absent: '):
vote_list, vote_type = vote_list[9:], vote.other
if vote_type:
for name in vote_list.split(','):
vote_type(name.strip())
vote.add_source(url)
return vote
示例5: scrape_votes
def scrape_votes(self, link, chamber, bill):
with self.urlopen(link) as votes_page_html:
votes_page = lxml.html.fromstring(votes_page_html)
page_tables = votes_page.cssselect('table')
votes_table = page_tables[0]
votes_elements = votes_table.cssselect('td')
# Eliminate table headings and unnecessary element
votes_elements = votes_elements[3:len(votes_elements)]
ve = grouper(5, votes_elements)
for actor, date, name_and_text, name, text in ve:
if 'cow' in text.text_content() or 'COW' in text.text_content():
continue
vote_date = dt.datetime.strptime(date.text_content(), '%m/%d/%Y')
motion_and_votes = text.text_content().lstrip('FINAL VOTE - ')
motion, sep, votes = motion_and_votes.partition('.')
if 'passed' in votes:
passed = True
else:
passed = False
votes_match = re.search('([0-9]+)-([0-9]+)-?([0-9]+)?', votes)
yes_count = votes_match.group(1)
no_count = votes_match.group(2)
other_count = votes_match.group(3)
if other_count == None:
other_count = 0
vote = Vote(chamber, vote_date, motion, passed, \
yes_count, no_count, other_count)
vote.add_source(link)
bill.add_vote(vote)
示例6: scrape_vote
def scrape_vote(self, bill, name, url):
match = re.match('^(Senate|House) Vote on [^,]*,(.*)$', name)
if not match:
return
chamber = {'Senate': 'upper', 'House': 'lower'}[match.group(1)]
motion = match.group(2).strip()
if motion.startswith('FINAL PASSAGE'):
type = 'passage'
elif motion.startswith('AMENDMENT'):
type = 'amendment'
elif 'ON 3RD READINT' in motion:
type = 'reading:3'
else:
type = 'other'
vote = Vote(chamber, None, motion, None,
None, None, None)
vote['type'] = type
vote.add_source(url)
with self.urlopen(url) as text:
(fd, temp_path) = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as w:
w.write(text)
html = pdf_to_lxml(temp_path)
os.remove(temp_path)
vote_type = None
total_re = re.compile('^Total--(\d+)$')
body = html.xpath('string(/html/body)')
for line in body.replace(u'\xa0', '\n').split('\n'):
line = line.replace(' ', '').strip()
if not line:
continue
if line in ('YEAS', 'NAYS', 'ABSENT'):
vote_type = {'YEAS': 'yes', 'NAYS': 'no',
'ABSENT': 'other'}[line]
elif vote_type:
match = total_re.match(line)
if match:
vote['%s_count' % vote_type] = int(match.group(1))
elif vote_type == 'yes':
vote.yes(line)
elif vote_type == 'no':
vote.no(line)
elif vote_type == 'other':
vote.other(line)
# The PDFs oddly don't say whether a vote passed or failed.
# Hopefully passage just requires yes_votes > not_yes_votes
if vote['yes_count'] > (vote['no_count'] + vote['other_count']):
vote['passed'] = True
else:
vote['passed'] = False
bill.add_vote(vote)
示例7: scrape_vote
def scrape_vote(self, bill, date, url):
with self.urlopen(url) as page:
page = lxml.html.fromstring(page)
header = page.xpath("string(//h4[contains(@id, 'hdVote')])")
location = header.split(', ')[1]
if location.startswith('House'):
chamber = 'lower'
elif location.startswith('Senate'):
chamber = 'upper'
else:
raise ScrapeError("Bad chamber: %s" % chamber)
committee = ' '.join(location.split(' ')[1:]).strip()
if not committee or committee.startswith('of Representatives'):
committee = None
motion = ', '.join(header.split(', ')[2:]).strip()
yes_count = int(
page.xpath("string(//td[contains(@id, 'tdAyes')])"))
no_count = int(
page.xpath("string(//td[contains(@id, 'tdNays')])"))
excused_count = int(
page.xpath("string(//td[contains(@id, 'tdExcused')])"))
absent_count = int(
page.xpath("string(//td[contains(@id, 'tdAbsent')])"))
other_count = excused_count + absent_count
passed = yes_count > no_count
if motion.startswith('Do Pass'):
type = 'passage'
elif motion == 'Concurred in amendments':
type = 'amendment'
elif motion == 'Veto override':
type = 'veto_override'
else:
type = 'other'
vote = Vote(chamber, date, motion, passed, yes_count, no_count,
other_count)
vote['type'] = type
if committee:
vote['committee'] = committee
vote.add_source(url)
for td in page.xpath("//table[contains(@id, 'tblVotes')]/tr/td"):
if td.text == 'Yea':
vote.yes(td.getprevious().text.strip())
elif td.text == 'Nay':
vote.no(td.getprevious().text.strip())
elif td.text in ('Excused', 'Absent'):
vote.other(td.getprevious().text.strip())
bill.add_vote(vote)
示例8: parse_vote_new
def parse_vote_new(self, bill, chamber, url):
vote_page = BeautifulSoup(self.urlopen(url))
table = vote_page.table
info_row = table.findAll('tr')[1]
date = info_row.td.contents[0]
date = dt.datetime.strptime(date, '%m/%d/%Y')
motion = info_row.findAll('td')[1].contents[0]
yes_count = int(info_row.findAll('td')[2].contents[0])
no_count = int(info_row.findAll('td')[3].contents[0])
abs_count = int(info_row.findAll('td')[4].contents[0])
passed = info_row.findAll('td')[5].contents[0] == 'Pass'
vote = Vote(chamber, date, motion, passed,
yes_count, no_count, abs_count)
vote.add_source(url)
for tr in table.findAll('tr')[3:]:
if len(tr.findAll('td')) != 2:
continue
name = tr.td.contents[0].split(' of')[0]
type = tr.findAll('td')[1].contents[0]
if type.startswith('Yea'):
vote.yes(name)
elif type.startswith('Nay'):
vote.no(name)
else:
vote.other(name)
bill.add_vote(vote)
示例9: scrape_votes
def scrape_votes(self, bill, bill_type, number, session):
vote_url = ('http://www.legislature.state.oh.us/votes.cfm?ID=' +
session + '_' + bill_type + '_' + str(number))
with self.urlopen(vote_url) as page:
page = lxml.etree.fromstring(page, lxml.etree.HTMLParser())
for jlink in page.xpath("//a[contains(@href, 'JournalText')]"):
date = datetime.datetime.strptime(jlink.text,
"%m/%d/%Y").date()
details = jlink.xpath("string(../../../td[2])")
chamber = details.split(" - ")[0]
if chamber == 'House':
chamber = 'lower'
elif chamber == 'Senate':
chamber = 'upper'
else:
raise ScrapeError("Bad chamber: %s" % chamber)
motion = details.split(" - ")[1].split("\n")[0].strip()
vote_row = jlink.xpath("../../..")[0].getnext()
yea_div = vote_row.xpath(
"td/font/div[contains(@id, 'Yea')]")[0]
yeas = []
for td in yea_div.xpath("table/tr/td"):
name = td.xpath("string()")
if name:
yeas.append(name)
no_div = vote_row.xpath(
"td/font/div[contains(@id, 'Nay')]")[0]
nays = []
for td in no_div.xpath("table/tr/td"):
name = td.xpath("string()")
if name:
nays.append(name)
yes_count = len(yeas)
no_count = len(nays)
vote = Vote(chamber, date, motion, yes_count > no_count,
yes_count, no_count, 0)
for yes in yeas:
vote.yes(yes)
for no in nays:
vote.no(no)
bill.add_vote(vote)
示例10: scrape_vote
def scrape_vote(self, bill, name, url):
match = re.match("^(Senate|House) Vote on [^,]*,(.*)$", name)
if not match:
return
chamber = {"Senate": "upper", "House": "lower"}[match.group(1)]
motion = match.group(2).strip()
if motion.startswith("FINAL PASSAGE"):
type = "passage"
elif motion.startswith("AMENDMENT"):
type = "amendment"
elif "ON 3RD READINT" in motion:
type = "reading:3"
else:
type = "other"
vote = Vote(chamber, None, motion, None, None, None, None)
vote["type"] = type
vote.add_source(url)
with self.urlopen(url) as text:
(fd, temp_path) = tempfile.mkstemp()
with os.fdopen(fd, "wb") as w:
w.write(text)
html = pdf_to_lxml(temp_path)
os.remove(temp_path)
vote_type = None
total_re = re.compile("^Total--(\d+)$")
body = html.xpath("string(/html/body)")
for line in body.replace(u"\xa0", "\n").split("\n"):
line = line.replace(" ", "").strip()
if not line:
continue
if line in ("YEAS", "NAYS", "ABSENT"):
vote_type = {"YEAS": "yes", "NAYS": "no", "ABSENT": "other"}[line]
elif vote_type:
match = total_re.match(line)
if match:
vote["%s_count" % vote_type] = int(match.group(1))
elif vote_type == "yes":
vote.yes(line)
elif vote_type == "no":
vote.no(line)
elif vote_type == "other":
vote.other(line)
# The PDFs oddly don't say whether a vote passed or failed.
# Hopefully passage just requires yes_votes > not_yes_votes
if vote["yes_count"] > (vote["no_count"] + vote["other_count"]):
vote["passed"] = True
else:
vote["passed"] = False
bill.add_vote(vote)
示例11: get_text_vote_results
def get_text_vote_results(self, bill, vote_date, motion_name, vote_data):
vote = Vote(bill['chamber'], vote_date, motion_name, None, 0, 0, 0)
counting_yeas = False
counting_nays = False
for line in vote_data.splitlines():
if line.find("Motion:") == 0:
line = line.strip().upper()
for x in ['DO CONCUR', 'DO PASS', 'DO ADOPT', ]:
if line.find(x) >= 0:
vote['passed'] = True
elif ((line.find("Yeas:") == 0) or (line.find("Ayes:") == 0)):
counting_yeas = True
counting_nays = False
elif ((line.find("Nays:") == 0) or (line.find("Noes") == 0)):
counting_yeas = False
counting_nays = True
elif line.find("Total ") == 0:
if not (counting_yeas or counting_nays):
vote['other_count'] += int(line.split()[1].strip())
elif line == '':
counting_yeas = False
counting_nays = False
if counting_yeas:
if line.find("Total ") == 0:
vote['yes_count'] = int(line.split()[1].strip())
line = ""
if line.find(":") != -1:
line = line[line.find(":")+1:]
for name in line.split(","):
name = name.strip()
if name != '':
if name[-1] == '.':
name = name[0:-1]
vote.yes(name)
if counting_nays:
if line.find("Total ") == 0:
vote['no_count'] = int(line.split()[1].strip())
line = ""
if line.find(":") != -1:
line = line[line.find(":")+1:]
for name in line.split(","):
name = name.strip()
if name != '':
if name[-1] == '.':
name = name[0:-1]
vote.no(name)
return vote
示例12: apply_votes
def apply_votes(self, bill):
"""Given a bill (and assuming it has a status_url in its dict), parse all of the votes
"""
bill_votes = votes.all_votes_for_url(self, bill['status_url'])
for (chamber,vote_desc,pdf_url,these_votes) in bill_votes:
try:
date = vote_desc.split("-")[-1]
except IndexError:
self.warning("[%s] Couldn't get date out of [%s]" % (bill['bill_id'],vote_desc))
continue
yes_votes = []
no_votes = []
other_votes = []
for voter,vote in these_votes.iteritems():
if vote == 'Y':
yes_votes.append(voter)
elif vote == 'N':
no_votes.append(voter)
else:
other_votes.append(voter)
passed = len(yes_votes) > len(no_votes) # not necessarily correct, but not sure where else to get it. maybe from pdf
vote = Vote(standardize_chamber(chamber),date,vote_desc,passed, len(yes_votes), len(no_votes), len(other_votes),pdf_url=pdf_url)
for voter in yes_votes:
vote.yes(voter)
for voter in no_votes:
vote.no(voter)
for voter in other_votes:
vote.other(voter)
bill.add_vote(vote)
示例13: scrape_old_vote
def scrape_old_vote(self, url):
vote_page = self.soup_parser(self.urlopen(url))
header = vote_page.h3.contents[0]
chamber_name = header.split(', ')[1]
if chamber_name.startswith('House'):
chamber = 'lower'
else:
chamber = 'upper'
location = ' '.join(chamber_name.split(' ')[1:])
if location.startswith('of Representatives'):
location = ''
motion = ', '.join(header.split(', ')[2:])
def get_count(cell):
if len(cell.contents) == 0:
return 0
else:
return int(cell.contents[0])
results_tbl = vote_page.findAll('table')[1]
yes_count = get_count(results_tbl.findAll('td')[1])
no_count = get_count(results_tbl.findAll('td')[3])
excused_count = get_count(results_tbl.findAll('td')[5])
absent_count = get_count(results_tbl.findAll('td')[7])
other_count = excused_count + absent_count
passed = yes_count > no_count
vote = Vote(chamber, None, motion, passed,
yes_count, no_count,
other_count, excused_count=excused_count,
absent_count=absent_count,
location=location)
vote.add_source(url)
vote_tbl = vote_page.table
for td in vote_tbl.findAll('td'):
if td.contents[0] == 'Yea':
vote.yes(td.findPrevious().contents[0])
elif td.contents[0] == 'Nay':
vote.no(td.findPrevious().contents[0])
elif td.contents[0] in ['Excused', 'Absent']:
vote.other(td.findPrevious().contents[0])
return vote
示例14: scrape_lower_vote
def scrape_lower_vote(self, url):
with self.urlopen(url) as page:
page = lxml.html.fromstring(page)
table = page.xpath("/html/body/table/tr[3]/td/table/tr/"
"td[3]/table/tr/td/table[3]")[0]
motion = ""
for part in ("Amendment Number", "Reading Number",
"Floor Actions"):
motion += page.xpath("string(//*[contains(text(), '%s')])" %
part).strip() + " "
motion = motion.strip()
date = page.xpath(
'string(//*[contains(text(), "Date:")]/following-sibling::*)')
date = datetime.datetime.strptime(date, "%m/%d/%Y")
yeas = page.xpath('string(//*[contains(text(), "Yeas")])')
yeas = int(yeas.split(' - ')[1])
nays = page.xpath('string(//*[contains(text(), "Nays")])')
nays = int(nays.split(' - ')[1])
nv = page.xpath('string(//*[contains(text(), "Not Voting")])')
nv = int(nv.split(' - ')[1])
passed = yeas > (nays + nv)
vote = Vote('lower', date, motion, passed, yeas, nays, nv)
vote.add_source(url)
for tr in table.xpath("tr/td/table/tr"):
text = tr.xpath("string()")
text = re.sub(r"\s+", r" ", text)
name = " ".join(text.split()[1:])
if text[0] == "Y":
vote.yes(name)
elif text[0] == "N":
vote.no(name)
elif text[0] in ("-", "C"):
vote.other(name)
return vote
示例15: scrape_new_vote
def scrape_new_vote(self, url):
vote_page = self.soup_parser(self.urlopen(url))
header = vote_page.find(id="ctl00_contentMain_hdVote").contents[0]
chamber_name = header.split(', ')[1]
if chamber_name.startswith('House'):
chamber = 'lower'
else:
chamber = 'upper'
location = ' '.join(chamber_name.split(' ')[1:])
if location.startswith('of Representatives'):
location = ''
motion = ', '.join(header.split(', ')[2:])
yes_count = int(vote_page.find(
id="ctl00_contentMain_tdAyes").contents[0])
no_count = int(vote_page.find(
id="ctl00_contentMain_tdNays").contents[0])
excused_count = int(vote_page.find(
id="ctl00_contentMain_tdExcused").contents[0])
absent_count = int(vote_page.find(
id="ctl00_contentMain_tdAbsent").contents[0])
other_count = excused_count + absent_count
passed = yes_count > no_count
vote = Vote(chamber, None, motion, passed,
yes_count, no_count,
other_count, excused_count=excused_count,
absent_count=absent_count,
location=location)
vote.add_source(url)
vote_tbl = vote_page.find(id="ctl00_contentMain_tblVotes")
for td in vote_tbl.findAll('td'):
if td.contents[0] == 'Yea':
vote.yes(td.findPrevious().contents[0])
elif td.contents[0] == 'Nay':
vote.no(td.findPrevious().contents[0])
elif td.contents[0] in ['Excused', 'Absent']:
vote.other(td.findPrevious().contents[0])
return vote