本文整理汇总了Python中models.Employee.departments方法的典型用法代码示例。如果您正苦于以下问题:Python Employee.departments方法的具体用法?Python Employee.departments怎么用?Python Employee.departments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Employee
的用法示例。
在下文中一共展示了Employee.departments方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handler
# 需要导入模块: from models import Employee [as 别名]
# 或者: from models.Employee import departments [as 别名]
def handler(tag):
tds = tag.find_all(name='td')
if not tds:
print("len(tds) == 0")
return None
employee = Employee()
if len(tds) < 4:
print("len(tds) = %d"%(len(tds)))
return None
name_tag = tds[0]
employee.name = name_tag.get_text()
employee.name = employee.name.strip()
if employee.name == u'姓名':
return None
ass = name_tag.find_all('a')
if ass and len(ass) != 0:
employee.url = ass[0]['href']
employee.title = tds[2].get_text().strip()
employee.departments = tds[3].get_text().strip()
return employee
示例2: profile_handler
# 需要导入模块: from models import Employee [as 别名]
# 或者: from models.Employee import departments [as 别名]
def profile_handler(doc, name, url, path):
filename = os.path.join(path, name + ".html")
employee = Employee(name=name, url=url)
# 只保存名称和个人主页,个人简历文件另存当前目录
soup = BeautifulSoup(doc, Config.SOUP_PARSER)
divs = soup.find_all(name="div", attrs={"class":"page_right addpage_right"}, limit=1)
if not divs or len(divs) == 0:
div= soup
else:
div = divs[0]
if not os.path.exists(filename):
with open(filename, 'wb') as fp:
content = div.prettify()
fp.write(content)
fp.close()
tds = div.find_all('td')
if tds and len(tds) == 11:
department = tds[2].get_text()
if department:
department = ''.join(department.split())
if department and len(department) != 0:
employee.departments = department
title = tds[4].get_text()
if title:
title = ''.join(title.split())
if title and len(title) != 0:
employee.title = title
email = tds[8].get_text()
if email:
email = ''.join(email.split())
if email and len(email) != 0:
employee.email = email
research = tds[10].get_text()
if research:
research = ''.join(research.split())
if research and len(research) != 0:
employee.research = research
divs = soup.find_all(name="div", attrs={"class":"text_more"}, limit=1)
if divs and len(divs) != 0:
div = divs[0]
# 使用纯文本方式处理
lines = div.stripped_strings
# text=div.get_text(strip=True)
parser = ProfileParser(lines=lines,employee=employee,set_attr_hook=set_attr_hook)
return parser.parse()