本文整理匯總了Python中spider.Spider.start方法的典型用法代碼示例。如果您正苦於以下問題:Python Spider.start方法的具體用法?Python Spider.start怎麽用?Python Spider.start使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類spider.Spider
的用法示例。
在下文中一共展示了Spider.start方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from spider import Spider [as 別名]
# 或者: from spider.Spider import start [as 別名]
def main():
""" 程序主入口
獲取命令行參數並做判斷和處理,根據參數設置logger,創建線程池和 spider,線程池中加入
工作線程 處理線程任務,spider向線程池中加入任務。
"""
# 獲取命令行參數並處理
args = base.get_arg()
if not base.check_args(args):
print 'Args error!'
sys.exit()
base.handle_args(args)
# 設置logger
if not base.set_logger(args.log_file, args.log_level):
print 'Set logger error'
sys.exit()
logger.debug('Get args :%s' % args)
# 程序自檢
if args.test_self:
base.test_self()
sys.exit()
database = Sqlite3DB(args.db_file)
# 創建 spider 和 線程池。根據 thread_num 向線程池加入多個工作線程。
# 在 spider 中建立多個任務 放入到線程池中。
spider = Spider(args.url, args.depth, args.thread_num, args.key_word,
args.down_file, database)
main_thread = MainThread(spider)
main_thread.start()
spider.start()
示例2: main
# 需要導入模塊: from spider import Spider [as 別名]
# 或者: from spider.Spider import start [as 別名]
def main():
args = command_parser()
target_url = args.target_url[0]
depth = int(args.depth[0])
log_level = int(args.log_level)
log_file = args.log_file
thread_number = int(args.thread_number)
key = args.key
db_file = args.db_file
test_self = args.test_self
spider = Spider(target_url, depth=depth, thread_number=thread_number)
spider.start()
示例3: TestSpider
# 需要導入模塊: from spider import Spider [as 別名]
# 或者: from spider.Spider import start [as 別名]
class TestSpider(unittest.TestCase):
"""Test case for the Spider clas"""
def setUp(self):
"""Set up"""
self.spider_q = Queue()
self.db_q = Queue()
self.url_q = Queue()
for i in range(5):
self.spider = Spider(self.spider_q, self.db_q, self.url_q,
self.start, blacklist=(os.path.abspath(
'blacklist.txt')))
self.spider.setDaemon(True)
self.spider.start()
self.pages = ['http://exchanges.state.gov/heritage/index.html',
'http://exchanges.state.gov/heritage/iraq.html',
'http://exchanges.state.gov/heritage/special.html',
'http://exchanges.state.gov/heritage/culprop.html',
'http://exchanges.state.gov/heritage/afcp.html']
self.start = pages[0]
self.soups = [BeautifulSoup(requests.get(page).text) for page in
self.pages]
for soup in self.soups:
self.spider_q.get(soup)
self.spider_q.join()
self.soup = soups[0]
def test_get_links(self):
"""Tests only links to web pages are being collected"""
actual = self.spider.get_links(self.soup)
expected = set([
'http://exchanges.state.gov/scho-pro.html',
'http://www.state.gov/misc/87529.htm#privacy',
'http://www.state.gov/m/a/ips/',
'http://exchanges.state.gov/alumni/index.html',
'http://exchanges.state.gov/student.html',
'http://exchanges.state.gov/programs/professionals.html',
'http://exchanges.state.gov/about/assistant-secretary-stock.html',
'http://exchanges.state.gov/news/index.html',
'http://exchanges.state.gov/heritage/index.html',
'http://exchanges.state.gov/heritage/1sindex.html',
'http://exchanges.state.gov/heritage/culprop.html',
'http://exchanges.state.gov/mobile/index.html',
'http://j1visa.state.gov/',
'http://www.state.gov/misc/415.htm',
'http://exchanges.state.gov/index.html',
'http://exchanges.state.gov/sports/index.html',
'http://exchanges.state.gov/grants/preparing_payment.html',
'http://state.gov/',
'http://exchanges.state.gov/grants/faqs.html',
'http://exchanges.state.gov/heritage/whatsnew.html',
'http://exchanges.state.gov/',
'http://exchanges.state.gov/about/program_offices.html',
'http://exchanges.state.gov/englishteaching/forum-journal.html',
'http://www.state.gov/misc/60289.htm',
'http://exchanges.state.gov/heritage/iraq.html',
'http://exchanges.state.gov/grants/terminology.html',
'http://exchanges.state.gov/heritage/sindex.html',
'http://exchanges.state.gov/heritage/special.html',
'http://exchanges.state.gov/grants/preparing_reports.html',
'http://exchanges.state.gov/programevaluations/index.html',
'http://exchanges.state.gov/programs/scholars.html',
'http://exchanges.state.gov/programs/cultural.html',
'http://exchanges.state.gov/programs/secondary-school.html',
'http://www.usa.gov/',
'http://exchanges.state.gov/about/contact-us.html',
'http://exchanges.state.gov/programs/university.html',
'http://www.state.gov/misc/87529.htm#copyright',
'http://exchanges.state.gov/grants/open2.html',
'http://exchanges.state.gov/programs/english-language.html',
'http://exchanges.state.gov/jexchanges/ppp.html',
'http://exchanges.state.gov/pro-admin.html',
'http://exchanges.state.gov/search.html',
'http://exchanges.state.gov/grants/cfda.html',
'http://www.iawg.gov/',
'http://exchanges.state.gov/englishteaching/resources-et.html',
('http://exchanges.state.gov/heritage/culprop/index/'
'pdfs/unesco01.pdf'),
'http://exchanges.state.gov/heritage/afcp.html',
'http://exchanges.state.gov/features/index.html',
'http://exchanges.state.gov/host/index.html',
'http://exchanges.state.gov/about/employment.html',
'http://exchanges.state.gov/programs/educators.html',
'http://exchanges.state.gov/a-z.html',
'http://exchanges.state.gov/about.html',
('http://exchanges.state.gov/programevaluations/'
'program-evaluations.html'),
])
self.assertEqual(actual, expected)
def test_get_pdfs(self):
"""Tests that pdfs are being found on page"""
actual = self.spider.get_pdfs(self.soup)
expected = set([('http://exchanges.state.gov/heritage/culprop/index/'
'pdfs/unesco01.pdf')])
self.assertEqual(actual, expected)
def test_black_list(self):
"""Tests black list is being pulled in"""
actual = self.spider.black_list()
expected = ['http://exchanges.state.gov/heritage/iraq.html',
#.........這裏部分代碼省略.........
示例4: run
# 需要導入模塊: from spider import Spider [as 別名]
# 或者: from spider.Spider import start [as 別名]
def run(self):
while 1:
if self.spider.status:
print '\n-------------------------------------------'
print 'CurrentDepth : %d' % self.spider.curr_depth
print 'Already visited %d Links' % len(self.spider.visited_list)
print '%d tasks remaining in task_list.' % self.spider.task_list.qsize()
print '%d urls extended.' % len(self.spider.extend_list)
print '-------------------------------------------\n'
time.sleep(self.interval)
def print_total_time(self):
self.end_time = datetime.now()
print('Begin time: %s' % self.begin_time)
print('End time: %s' % self.end_time)
print('Totally Spend %s \n' % (self.end_time - self.begin_time))
if __name__ == '__main__':
args = parser.parse_args()
if not logconf(args.log_file, args.log_level):
print('logger configure failed!')
else:
spider = Spider(args)
printpro = printProgress(spider, args.interval)
printpro.start()
spider.start()
print("Misson complete")
printpro.print_total_time()