本文整理汇总了Python中link.Link.source方法的典型用法代码示例。如果您正苦于以下问题:Python Link.source方法的具体用法?Python Link.source怎么用?Python Link.source使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类link.Link
的用法示例。
在下文中一共展示了Link.source方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getLinks
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def getLinks(self):
r= requests.get(self.URL1)
soup = BeautifulSoup(r.content)
linksArray = []
nasdaqArray = []
index = 0;
links = soup.findAll("ul",{"class":"list-links"})
dates = soup.findAll("div",{"class":"col-sm-3 col-md-2"})
#dates = soup.findAll("p"),{"class":"date"})
#timePublished = soup.findAll("p",{"class":"time"})
buttons = soup.findAll("div",{"class":"btn-group"})
[button.extract() for button in buttons]
for link in links:
title = link.findAll('a',href=True)
text = link.findAll('li')
match = re.search("[(]\s?nasdaq(:| :|: | :|)\s?(?P<symbol>[a-z][a-z][a-z][a-z]?)\s?.*?[)]",link.getText().lower())
if match:
if match.group("symbol"):
match2 = re.search("to Present at",link.getText())
if not match2:
for symbol in reader.requestArray:
if symbol[0].lower() == match.group("symbol"):
if not "," in str(dates[index]):
newLink = Link()
newLink.symbol = symbol[0]
newLink.url = title[0]['href']
newLink.text = text[1].text
newLink.linkText = title[0].text
newLink.date = dates[index].text.strip()
newLink.source = "PrNewswire"
linksArray.append(newLink)
elif self.today in str(dates[index]):
newLink = Link()
newLink.symbol = symbol[0]
newLink.url = title[0]['href']
newLink.text = text[1].text
newLink.linkText = title[0].text
newLink.date = dates[index].text.strip()
newLink.source = "PrNewswire"
linksArray.append(newLink)
index= index+1
return linksArray
示例2: getLinks
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def getLinks(self):
s = requests.Session()
r = s.get(self.url)
soup = BeautifulSoup(r.content)
linksArray = []
items = soup.findAll('item')
index = 0;
for item in items:
title = soup.select('item title')
description = soup.select('item description')
link = soup.select('item link')
match = re.search("[(]\s?nasdaq(:| :|: | :|)\s?(?P<symbol>[a-z][a-z][a-z][a-z]?)\s?[)]",item.getText().lower())
if match:
if match.group("symbol"):
for symbol in reader.requestArray:
if symbol[0].lower() == match.group("symbol"):
newLink = Link()
newLink.symbol = symbol[0]
newLink.url = link[index].text
newLink.text = description[index].text
newLink.linkText = title[index].text
#newLink.date = dates[index].text.strip()
newLink.source = "GlobeNewswire"
linksArray.append(newLink)
index= index+1
return linksArray
示例3: getLinks
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def getLinks(self):
linksArray = []
for pageNumber in self.pageArray:
urlQuery = self.query.format(pageNumber)
#print URL+urlQuery
resp = requests.get(self.URL+urlQuery)
soup = BeautifulSoup(resp.content)
index = 0;
ul = soup.findAll("ul",{"class":"bw-news-list"})
links = soup.select("ul.bw-news-list li")
dates = soup.findAll("time")
headlyne = soup.select("ul.bw-news-list h3")
summaries = soup.select("ul.bw-news-list p")
for link in links:
title = link.findAll('a',href=True)
text = link.findAll('p')
#match1 = re.search("") to match the company name with symbol[0] from reader
match = re.search("[(]\s?nasdaq(:| :|: | :|)\s?(?P<symbol>[a-z][a-z][a-z][a-z]?)\s?[)]",link.getText().lower())
if match:
if match.group("symbol"):
match2 = re.search("to present at",link.getText().lower())
if not match2:
if self.today in str(dates[index]):
for symbol in reader.requestArray:
if symbol[0].lower() == match.group("symbol"):
newLink = Link()
newLink.symbol = symbol[0]
newLink.url = title[0]['href']
newLink.text = text[0].text
newLink.linkText = title[0].text
newLink.date = dates[index].text.strip()
newLink.source = "BusinessWire"
linksArray.append(newLink)
index= index+1
return linksArray
示例4: getLinks
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def getLinks(self):
s = requests.Session()
dummyResp = s.get("http://www.marketwired.com/")
linksArray = []
for pageNumber in self.pageArray:
urlQuery = self.query.format(pageNumber)
resp = s.get(self.URL1)
queryResp = s.get(resp.url+urlQuery)
soup = BeautifulSoup(queryResp.content)
links = soup.findAll("div",{"style":"margin-bottom: 30px;"})
dates = soup.findAll("span",{"style":"color: #888888; font-size: 9pt"})
index = 0;
for link in links:
title = link.findAll('a',href=True)
text = link.findAll('div',{"class":"search-results-width"})
match = re.search("[(]\s?nasdaq(:| :|: | :|)\s?(?P<symbol>[a-z][a-z][a-z][a-z]?)\s?[)]",link.getText().lower())
if match:
if match.group("symbol"):
match2 = re.search("to present at",link.getText().lower())
if not match2:
for symbol in reader.requestArray:
if symbol[0].lower() == match.group("symbol"):
if self.today in str(dates[index]):
newLink = Link()
newLink.symbol = symbol[0]
newLink.url = "http://www.marketwired.com"+title[0]['href']
newLink.text = text[1].text
newLink.linkText = title[0].text
newLink.date = dates[index].text.strip()
newLink.source = "MarketWired"
linksArray.append(newLink)
index= index+1
return linksArray
示例5: parse_vis_data
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def parse_vis_data(self,vis_data):
for x in vis_data:
if 'of' in x:
try:
node = self.maybe_node_by_mac((x['of'], x['secondary']))
except:
node = Node()
node.flags['online'] = True
if 'legacy' in x:
node.flags['legacy'] = True
self._nodes.append(node)
node.add_mac(x['of'])
node.add_mac(x['secondary'])
for x in vis_data:
if 'router' in x:
try:
node = self.maybe_node_by_mac((x['router'], ))
except:
node = Node()
node.flags['online'] = True
if 'legacy' in x:
node.flags['legacy'] = True
node.add_mac(x['router'])
self._nodes.append(node)
# If it's a TT link and the MAC is very similar
# consider this MAC as one of the routers
# MACs
if 'gateway' in x and x['label'] == "TT":
if is_similar(x['router'], x['gateway']):
node.add_mac(x['gateway'])
# skip processing as regular link
continue
try:
if 'neighbor' in x:
try:
node = self.maybe_node_by_mac((x['neighbor']))
except:
continue
if 'gateway' in x:
x['neighbor'] = x['gateway']
node = self.maybe_node_by_mac((x['neighbor'], ))
except:
node = Node()
node.flags['online'] = True
if x['label'] == 'TT':
node.flags['client'] = True
node.add_mac(x['neighbor'])
self._nodes.append(node)
for x in vis_data:
if 'router' in x:
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
router = self.maybe_node_by_mac((x['router'], ))
neighbor = self.maybe_node_by_mac((x['neighbor'], ))
except:
continue
# filter TT links merged in previous step
if router == neighbor:
continue
link = Link()
link.source = LinkConnector()
link.source.interface = x['router']
link.source.id = self._nodes.index(router)
link.target = LinkConnector()
link.target.interface = x['neighbor']
link.target.id = self._nodes.index(neighbor)
link.quality = x['label']
link.id = "-".join(sorted((link.source.interface, link.target.interface)))
if x['label'] == "TT":
link.type = "client"
self._links.append(link)
for x in vis_data:
if 'primary' in x:
try:
node = self.maybe_node_by_mac((x['primary'], ))
except:
continue
node.id = x['primary']
示例6: parse_vis_data
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def parse_vis_data(self,vis_data):
for x in vis_data:
if 'of' in x:
try:
node = self.maybe_node_by_mac((x['of'], x['secondary']))
except KeyError:
node = Node()
node.lastseen = self.time
node.firstseen = self.time
node.flags['online'] = True
self._nodes.append(node)
node.add_mac(x['of'])
node.add_mac(x['secondary'])
for x in vis_data:
if 'router' in x:
# TTs will be processed later
if x['label'] == "TT":
continue
try:
node = self.maybe_node_by_mac((x['router'], ))
except KeyError:
node = Node()
node.lastseen = self.time
node.firstseen = self.time
node.flags['online'] = True
node.add_mac(x['router'])
self._nodes.append(node)
try:
if 'neighbor' in x:
try:
node = self.maybe_node_by_mac((x['neighbor'], ))
except KeyError:
continue
if 'gateway' in x:
x['neighbor'] = x['gateway']
node = self.maybe_node_by_mac((x['neighbor'], ))
except KeyError:
node = Node()
node.lastseen = self.time
node.firstseen = self.time
node.flags['online'] = True
node.add_mac(x['neighbor'])
self._nodes.append(node)
for x in vis_data:
if 'router' in x:
# TTs will be processed later
if x['label'] == "TT":
continue
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
router = self.maybe_node_by_mac((x['router'], ))
neighbor = self.maybe_node_by_mac((x['neighbor'], ))
except KeyError:
continue
# filter TT links merged in previous step
if router == neighbor:
continue
link = Link()
link.source = LinkConnector()
link.source.interface = x['router']
link.source.id = self._nodes.index(router)
link.target = LinkConnector()
link.target.interface = x['neighbor']
link.target.id = self._nodes.index(neighbor)
link.quality = x['label']
link.id = "-".join(sorted((link.source.interface, link.target.interface)))
self._links.append(link)
for x in vis_data:
if 'primary' in x:
try:
node = self.maybe_node_by_mac((x['primary'], ))
except KeyError:
continue
node.id = x['primary']
for x in vis_data:
if 'router' in x and x['label'] == 'TT':
try:
node = self.maybe_node_by_mac((x['router'], ))
node.add_mac(x['gateway'])
node.clientcount += 1
except KeyError:
pass
#.........这里部分代码省略.........
示例7: import_batman
# 需要导入模块: from link import Link [as 别名]
# 或者: from link.Link import source [as 别名]
def import_batman(self, lines):
for line in lines:
x = json.loads(line)
if 'of' in x:
try:
node = self.maybe_node_by_mac((x['of'], x['secondary']))
except:
node = Node()
node.flags['online'] = True
self._nodes.append(node)
node.add_mac(x['of'])
node.add_mac(x['secondary'])
for line in lines:
x = json.loads(line)
if 'router' in x:
try:
node = self.maybe_node_by_mac((x['router'], ))
except:
node = Node()
node.flags['online'] = True
node.add_mac(x['router'])
self._nodes.append(node)
# If it's a TT link and the MAC is very similar
# consider this MAC as one of the routers
# MACs
if 'gateway' in x and x['label'] == "TT":
router = list(int(i, 16) for i in x['router'].split(":"))
gateway = list(int(i, 16) for i in x['gateway'].split(":"))
# first byte must only differ in bit 2
if router[0] == gateway[0] | 2:
# count different bytes
a = [x for x in zip(router[1:], gateway[1:]) if x[0] != x[1]]
# no more than two additional bytes must differ
if len(a) <= 2:
delta = 0
if len(a) > 0:
delta = sum(abs(i[0] -i[1]) for i in a)
if delta < 8:
# This TT link looks like a mac of the router!
node.add_mac(x['gateway'])
# skip processing as regular link
continue
try:
if 'neighbor' in x:
try:
node = self.maybe_node_by_mac((x['neighbor']))
except:
continue
if 'gateway' in x:
x['neighbor'] = x['gateway']
node = self.maybe_node_by_mac((x['neighbor'], ))
except:
node = Node()
node.flags['online'] = True
if x['label'] == 'TT':
node.flags['client'] = True
node.add_mac(x['neighbor'])
self._nodes.append(node)
for line in lines:
x = json.loads(line)
if 'router' in x:
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
router = self.maybe_node_by_mac((x['router'], ))
neighbor = self.maybe_node_by_mac((x['neighbor'], ))
except:
continue
# filter TT links merged in previous step
if router == neighbor:
continue
link = Link()
link.source = LinkConnector()
link.source.interface = x['router']
link.source.id = self._nodes.index(router)
link.target = LinkConnector()
link.target.interface = x['neighbor']
link.target.id = self._nodes.index(neighbor)
link.quality = x['label']
link.id = "-".join(sorted((link.source.interface, link.target.interface)))
#.........这里部分代码省略.........