本文整理汇总了Python中pipeline.Pipeline.stopWorking方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline.stopWorking方法的具体用法?Python Pipeline.stopWorking怎么用?Python Pipeline.stopWorking使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline.stopWorking方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GnipCollectionManager
# 需要导入模块: from pipeline import Pipeline [as 别名]
# 或者: from pipeline.Pipeline import stopWorking [as 别名]
class GnipCollectionManager(object):
def __init__(self,accountname, username, password):
self.accountname = accountname
self.username = username
self.password = password
self.pipeline = Pipeline()
for plsc in pipelinestages.getPipelineCollectionStageClasses():
self.pipeline.appendStage(plsc())
#self.pipeline.appendStage(Pipeline.Stage())
self.extractor = None
self.gniprules = gnip_rules.GnipCollectionRules(accountname, username, password)
def startWorking(self):
self.pipeline.startWorking()
URL = "https://%s.gnip.com/data_collectors/1/stream.xml" %(self.accountname)
self.extractor = GnipDataCollectionStreamReceiver(URL, self.username, self.password, self.pipeline.getSourceQueue())
self.extractor.start()
def stopWorking(self):
if self.extractor:
self.extractor.stopWorking()
self.extractor.join()
self.extractor = None
self.pipeline.stopWorking()
def getRules(self):
return self.gniprules.getRules()['rules']
def addRules(self, rules):
self.gniprules.initLocalRules()
for r in rules:
self.gniprules.appendLocalRule(r['value'], r.get('tag', None))
self.gniprules.createGnipRules()
def deleteRules(self, rules):
self.gniprules.initLocalRules()
for r in rules:
self.gniprules.appendLocalRule(r['value'], r.get('tag', None))
self.gniprules.deleteGnipRules()
def getStats(self):
res = {}
res['Pipeline'] = self.pipeline.getStats()
return res
示例2: FeedManager
# 需要导入模块: from pipeline import Pipeline [as 别名]
# 或者: from pipeline.Pipeline import stopWorking [as 别名]
class FeedManager(object):
def __init__(self):
self.pipeline = Pipeline()
self.history_pipeline = Pipeline()
for plsc in pipelinestages.getPipelineFeedStageClasses():
self.pipeline.appendStage(plsc())
for plsc in pipelinestages.getPipelineHistoryFeedStageClasses():
self.history_pipeline.appendStage(plsc())
def startWorking(self):
self.extractors = []
self.history_extractors = []
for acc, camp, url in self.getAllHistoryFeedURLs():
extractor = HistoryFeedFetcher(acc, camp, url,self.history_pipeline.getSourceQueue())
extractor.start()
self.extractors.append(extractor)
if self.history_extractors: self.history_pipeline.startWorking()
for acc, camp, url in self.getAllFeedURLs():
url += "/comments/feed"
extractor = FeedFetcher(acc, camp, url,self.pipeline.getSourceQueue())
extractor.start()
self.extractors.append(extractor)
if self.extractors: self.pipeline.startWorking()
def stopWorking(self):
if self.extractors:
for extractor in self.extractors:
extractor.stopWorking()
if self.history_extractors:
for extractor in self.history_extractors:
extractor.stopWorking()
while self.extractors or self.history_extractors:
for extractor in self.extractors:
extractor.join(1)
if not extractor.isAlive():
self.extractors.remove(extractor)
for extractor in self.history_extractors:
extractor.join(1)
if not extractor.isAlive():
self.history_extractors.remove(extractor)
self.pipeline.stopWorking()
self.history_pipeline.stopWorking()
def getStats(self):
res = {}
res['Pipeline'] = self.pipeline.getStats()
return res
def getAllFeedURLs(self):
res = []
accs = MongoManager.getActiveAccounts()
for acc in accs:
for camp in acc.getActiveCampaigns():
for url in camp.getForums():
res.append((acc, camp, url))
return res
def getAllHistoryFeedURLs(self):
res = []
accs = MongoManager.getActiveAccounts()
for acc in accs:
for camp in acc.getActiveCampaigns():
hff = camp.getHistoryFetchedForums()
for url in camp.getForums():
if url not in hff:
res.append((acc, camp, url))
return res