当前位置: 首页>>代码示例>>Python>>正文


Python Scheduler.get方法代码示例

本文整理汇总了Python中scheduler.Scheduler.get方法的典型用法代码示例。如果您正苦于以下问题:Python Scheduler.get方法的具体用法?Python Scheduler.get怎么用?Python Scheduler.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scheduler.Scheduler的用法示例。


在下文中一共展示了Scheduler.get方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: start

# 需要导入模块: from scheduler import Scheduler [as 别名]
# 或者: from scheduler.Scheduler import get [as 别名]
    def start():

        scheduler = Scheduler.get()
        scheduler.start_thread()

        task = TransactionQueueManager()
        task.cleanup_db_jobs()

        scheduler.add_single_task(task, mode='threaded', delay=1)
开发者ID:funic,项目名称:TACTIC,代码行数:11,代码来源:run_transaction_cmd.py

示例2: start

# 需要导入模块: from scheduler import Scheduler [as 别名]
# 或者: from scheduler.Scheduler import get [as 别名]
 def start():
     
     scheduler = Scheduler.get()
     scheduler.start_thread()
     task = TransactionQueueManager(
         #check_interval=0.1,
         max_jobs_completed=20
     )
     task.cleanup_db_jobs()
     scheduler.add_single_task(task, mode='threaded', delay=1)
     # important to close connection after adding tasks
     DbContainer.close_all()
开发者ID:mincau,项目名称:TACTIC,代码行数:14,代码来源:run_transaction_cmd.py

示例3: start

# 需要导入模块: from scheduler import Scheduler [as 别名]
# 或者: from scheduler.Scheduler import get [as 别名]
    def start(cls):
        
        task = WatchServerFolderTask()

        scheduler = Scheduler.get()
        scheduler.add_single_task(task, 3)
        #scheduler.add_interval_task(task, 1)

        scheduler.start_thread()
        # important to close connection after adding tasks
        DbContainer.close_all()

        return scheduler
开发者ID:0-T-0,项目名称:TACTIC,代码行数:15,代码来源:watch_handoff_folder.py

示例4: run_calc

# 需要导入模块: from scheduler import Scheduler [as 别名]
# 或者: from scheduler.Scheduler import get [as 别名]
def run_calc(args):
    try:
        with utils.TemporaryDirectory() as tmpdir:
            logging.getLogger().setLevel(logging.INFO)
            logging.basicConfig(
                format="%(asctime)s [%(levelname)s]: %(message)s"
            )
            pypi = PyPi()
            extractor = Extractor(
                virtualenv=args.virtualenv,
                tmpdir=tmpdir,
                pypi=pypi
            )
            db = Database(
                host=args.redis_host,
                port=args.redis_port,
                db=args.redis_db
            )
            scheduler = Scheduler(
                db=db,
                extractor=extractor,
                pypi=pypi
            )

            # start with given paths
            # also remember what we have got here,
            # because it is important for the PBO part later
            must_satisfy = []
            for p in args.paths:
                splitted = p.split(':')
                cwd = splitted[0]
                if len(splitted) > 1:
                    extras = splitted[1].split(',')
                else:
                    extras = []

                data = extractor.from_path(cwd, db)

                must_satisfy.append(
                    (
                        utils.normalize(data['name']),
                        utils.normalize(data['version'])
                    )
                )
                scheduler.add_todos_from_db(data['name'], data['version'], '')
                scheduler.done_with_all_versions(data['name'], '')
                for e in itertools.chain([''], extras):
                    scheduler.add_todos_from_db(
                        data['name'],
                        data['version'],
                        e
                    )
                    scheduler.done_with_all_versions(data['name'], e)

            # run until no tasks left
            todo = scheduler.get()
            while todo:
                (name, extra) = todo

                if args.cached:
                    scheduler.process_cached(name, extra)
                else:
                    scheduler.process_extract(name, extra)
                todo = scheduler.get()

            # finally solve our problem
            solver.solve(
                scheduler,
                db,
                must_satisfy,
                tmpdir,
                args.solver,
                args.outfile,
                args.include_starting_points
            )

    except utils.HandledError as e:
        logging.error(e.message)
开发者ID:crepererum,项目名称:eprc,代码行数:80,代码来源:__main__.py

示例5: crawl_spider

# 需要导入模块: from scheduler import Scheduler [as 别名]
# 或者: from scheduler.Scheduler import get [as 别名]
def crawl_spider(spider):
    ############### TRAIN SPIDER ##############
    if spider == "train":
        # initialize the scheduling queue
        q = Scheduler()              
        # initialize all of the pipelines
        pipeline = []
        for pipe in settings.PIPELINES:
            try:
                pipeline.append( getattr( pipelines, pipe )() )
            except: 
                print "Error: Unable to initialize %s pipe" % pipe
                quit()
        # initialize the spider
        # try:
        #     s = getattr(spiders, spider)()    
        # except:
        #     print "Error: It's likely that the input spider does not exist in spiders.py"
        #     quit()
        s = spiders.Train()
        #print s.__doc__
        # add all of the start links and known links to the top level of the queue
        for url in list(s.start_urls) + list(s.known_urls):
            q.add_link(url, 0)
        q.print_queue()
        # request urls while scheduler not empty and pass to to spider
        # add returned links to the queue
        # send returned items down the pipeline
        visits = 0
        while not q.is_empty():
            wait_between_requests() # wait a random small amount of time so we're less detectable
            url, level = q.get_next_link(what_level=True)
            print "Visit #%i, Q level %i, Q volume %i" % (visits, level, q.queue_volume())
            response = get_request(url)
            if response: 
                items, extracted_links = s.parse(response, level=level) # links and items are both links
                #print "exctracted links:", links
                add_to_queue(q, extracted_links) # manage the returned links
                send_down_pipeline(pipeline, items, s) # manage the returned items
                if settings.ASK_BETWEEN_REQUESTS: raw_input("Press ENTER to continue?")
                visits += 1 

        if q.is_empty(): print "CRAWL IS FINISHED: Queue is empty"
        #if visits >= settings.MAX_CRAWLS: print "CRAWL IS FINISHED: Crawled max number of urls (%i total)" % visits

    ################ TEST SPIDER ##############
    elif spider == "test":
        print "Test case"
        q = PriorityQueue()              
        queued_links = set()
        # initialize all of the pipelines
        pipeline = []
        for pipe in settings.PIPELINES:
            try:
                pipeline.append( getattr( pipelines, pipe )() )
            except: 
                print "Error: Unable to initialize %s pipe" % pipe
                quit()
        # initialize the spider
        # try:
        #     s = spiders.Test()    
        # except:
        #     print "Error: It's likely that the input spider does not exist in spiders.py"
        #     quit()
        #print s.__doc__
        s = spiders.Test()    
        # add all of the start links and known links to the top level of the queue
        q.put((-.1, s.start_urls[0]))
        queued_links.add(s.start_urls[0])
        # request urls while scheduler not empty and pass to to spider
        # add returned links to the queue
        # send returned items down the pipeline
        visits = 0
        while not q.empty():
            wait_between_requests() # wait a random small amount of time so we're less detectable
            priority, url = q.get()
            print "Q get:", -priority, url
            print "Visit #%i, Q volume %i" % (visits, q.qsize())
            response = get_request(url)
            if response: 
                items, extracted_links = s.parse(response, level=-priority) # links and items are both links
                # print "Extracted item: ",items
                #print "extracted links:", extracted_links
                #print "exctracted links:", links
                for link in extracted_links:
                    if link[1] not in queued_links:
                        # print link
                        q.put((-link[0], link[1]))
                        queued_links.add(link[1])
                    # else:
                        # print "We already queued %s" % link[1]
                send_down_pipeline(pipeline, items, s) # manage the returned items
                if settings.ASK_BETWEEN_REQUESTS: raw_input("Press ENTER to continue?")
                visits += 1 

        if q.empty(): print "CRAWL IS FINISHED: Queue is empty"
        #if visits >= settings.MAX_CRAWLS: print "CRAWL IS FINISHED: Crawled max number of urls (%i total)" % visits

    else:
        quit()
开发者ID:teffland,项目名称:FindIt,代码行数:102,代码来源:findit.py


注:本文中的scheduler.Scheduler.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。