本文整理汇总了Python中scraper.Scraper.set_callbacks方法的典型用法代码示例。如果您正苦于以下问题:Python Scraper.set_callbacks方法的具体用法?Python Scraper.set_callbacks怎么用?Python Scraper.set_callbacks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scraper.Scraper
的用法示例。
在下文中一共展示了Scraper.set_callbacks方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: int
# 需要导入模块: from scraper import Scraper [as 别名]
# 或者: from scraper.Scraper import set_callbacks [as 别名]
'max_link_level': int(options.max_link_level),
'creation_datetime': str(datetime.datetime.now()),
'allowed_domains': [
],
'sleep_time': 0, # do not sleep between URL fetches
}
#try:
if True:
scraper = Scraper(
check_type=options.tracking_method,
check_type_uri=options.uri,
DEBUG=_DEBUG,
)
scraper.set_callbacks(
found_doc_callback = handle_doc,
)
scraper.set_url_data(url)
if _DEBUG == True:
print "\nStarting Scraper on {0} ...\n\n".format(options.target_url)
data = scraper.start()
if _DEBUG == True:
print "\n\nScraper complete.\n"
if _DEBUG == True:
print "BarkingOwl Scraper found {0} documents on {1}.\n\n".format(
len(data['documents']),
options.target_url,
)
示例2: ScraperWrapper
# 需要导入模块: from scraper import Scraper [as 别名]
# 或者: from scraper.Scraper import set_callbacks [as 别名]
class ScraperWrapper(object): #threading.Thread):
def __init__(self,
address='localhost',
exchange='barkingowl',
heartbeat_interval=30,
url_parameters=None,
broadcast_interval=5,
uid=str(uuid.uuid4()),
DEBUG=False):
#threading.Thread.__init__(self,name="ScraperWrapper : %s" % uid)
self.uid = str(uuid.uuid4())
self.address = address
self.exchange = exchange
self.heartbeat_interval = heartbeat_interval
self.url_parameters = url_parameters
self.broadcast_interval = broadcast_interval
self.uid = uid
self._DEBUG=DEBUG
print "ScraperWrapper().__init__(): Creating scraper ..."
self.scraper = Scraper(
DEBUG = self._DEBUG,
)
self.scraping = False
self.scraper_thread = None
print "ScraperWrapper().__init__(): Scraper Created."
self.stopped = False
self.bus_access = BusAccess(
uid = self.uid,
address = self.address,
exchange = self.exchange,
heartbeat_interval = self.heartbeat_interval,
url_parameters = self.url_parameters,
DEBUG = self._DEBUG,
)
self.bus_access.set_callback(
callback = self._reqcallback,
)
#threading.Timer(self.interval, self.broadcast_available).start()
#threading.Timer(self.interval, self.broadcast_simple_status).start()
#self.broadcast_status()
#log( "ScraperWrapper.__init__(): Scraper Wrapper INIT complete.", self.DEBUG )
#def run(self):
def start(self):
self.scraper.set_callbacks(
start_callback = self.scraper_started_callback,
finished_callback = self.scraper_finished_callback,
found_doc_callback = self.scraper_broadcast_document_callback,
new_url_callback = None,
bandwidth_limit_callback = None,
memory_limit_callback = None,
error_callback = None,
)
self.broadcast_status()
def stop(self):
self.bus_access.stop_listening()
self.scraper.stop()
self.stopped = True
def reset_scraper(self):
self.scraper.reset()
def broadcast_status(self):
if self._DEBUG == True:
print "ScraperWrapper().broadcast_status(): Entering status loop."
while not self.scraping and not self.stopped:
if self._DEBUG == True:
print "ScraperWrapper.broadcast_status() sending status pulse ..."
if self.scraping == False and self.scraper._data['working'] == False:
packet = {
'available_datetime': str(datetime.datetime.now())
}
self.bus_access.send_message(
command = 'scraper_available',
destination_id = 'broadcast',
message = packet,
)
'''
packet = {
'working': self.scraper._data['working'],
'seen_url_count': len(self.scraper._data['seen_urls']),
#.........这里部分代码省略.........