当前位置: 首页>>代码示例>>Python>>正文


Python LifoQueue.join方法代码示例

本文整理汇总了Python中Queue.LifoQueue.join方法的典型用法代码示例。如果您正苦于以下问题:Python LifoQueue.join方法的具体用法?Python LifoQueue.join怎么用?Python LifoQueue.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Queue.LifoQueue的用法示例。


在下文中一共展示了LifoQueue.join方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: copy_job

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
 def copy_job(self, max_keys=1000):
     logging.info( 'start copy_bucket' )
     src = self.job['source']
     tgt = self.job['target']
     
     conn = self.get_conn( tgt['owner'] )
     srcBucket = conn.get_bucket( src['bucket'] )
     tgtBucket = conn.get_bucket( tgt['bucket'] )
     
     if self.job['options']['allow-acl-change']:
         ownerBucketView = self.get_conn( src['owner'] ).get_bucket( src['bucket'] )
         ownerID = self.users[ tgt['owner'] ]['canonical-id']
     else:
         ownerBucketView = None
         ownerID = None
     resultMarker = ''
     q = LifoQueue(maxsize=5000)
     for i in range(self.parallel):
         logging.info( 'adding worker %d' % i )
         t = BucketCopyWorker(q, srcBucket, tgtBucket, src['key-prefix'], tgt['key-prefix'],  ownerBucketView, ownerID)
         t.daemon = True
         t.start()
     while True:
         logging.info( 'fetch next 1000, backlog currently at %i' % q.qsize() )
         keys = srcBucket.get_all_keys( prefix=src['key-prefix'], max_keys=max_keys, marker = resultMarker)
         for k in keys:
             q.put(k.key)
         if len(keys) < max_keys:
             print 'Done'
             break
         resultMarker = keys[maxKeys - 1].key
     q.join()
     logging.info( 'done copy_bucket' )
开发者ID:uhjish,项目名称:bucketbrigade,代码行数:35,代码来源:bucketbrigade.py

示例2: copyBucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
def copyBucket(maxKeys=1000):
    print 'start'

    s_conn = S3Connection(source_aws_key, source_aws_secret_key)
    srcBucket = s_conn.get_bucket(srcBucketName)

    resultMarker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(10):
        print 'adding worker'
        t = Worker(q)
        t.daemon = True
        t.start()

    while True:
        print 'fetch next 1000, backlog currently at %i' % q.qsize()
        keys = srcBucket.get_all_keys(max_keys=maxKeys, marker=resultMarker)
        for k in keys:
            q.put(k.key)
        if len(keys) < maxKeys:
            print 'Done'
            break
        resultMarker = keys[maxKeys - 1].key

    q.join()
    print 'done'
开发者ID:yerachw,项目名称:S3Copy,代码行数:29,代码来源:s3copy.py

示例3: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
def copy_bucket(aws_key, aws_secret_key, src_bucket_name, dst_bucket_name):
    print
    print 'Start copy of %s to %s' % (src_bucket_name, dst_bucket_name)
    print
    max_keys = 1000

    conn = S3Connection(aws_key, aws_secret_key)
    srcBucket = conn.get_bucket(src_bucket_name)

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(20):
        print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key, src_bucket_name, dst_bucket_name)
        t.daemon = True
        t.start()

    i = 0

    while True:
        print 'Fetch next %s, backlog currently at %s, have done %s' % (max_keys, q.qsize(), i)
        try:
            keys = srcBucket.get_all_keys(max_keys=max_keys, marker=result_marker)
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    print 'Waiting for queue to be completed'
    q.join()
    print
    print 'Done'
    print
开发者ID:WorkFu,项目名称:s3_bucket_to_bucket_copy_py,代码行数:47,代码来源:s3_bucket_to_bucket_copy.py

示例4: TileProvider

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
class TileProvider( QObject ):
    THREAD_HEARTBEAT = 0.2

    Tile = collections.namedtuple('Tile', 'id qimg rectF progress tiling') 
    changed = pyqtSignal( QRectF )


    '''TileProvider __init__
    
    Keyword Arguments:
    cache_size                -- maximal number of encountered stacks 
                                 to cache, i.e. slices if the imagesources  
                                 draw from slicesources (default 10)
    request_queue_size        -- maximal number of request to queue up (default 100000)
    n_threads                 -- maximal number of request threads; this determines the
                                 maximal number of simultaneously running requests 
                                 to the pixelpipeline (default: 2)
    layerIdChange_means_dirty -- layerId changes invalidate the cache; by default only 
                                 stackId changes do that (default False)
    parent                    -- QObject
    
    '''
    def __init__( self, tiling,
                  stackedImageSources,
                  cache_size = 10,
                  request_queue_size = 100000,
                  n_threads = 2,
                  layerIdChange_means_dirty=False,
                  parent=None ):
        QObject.__init__( self, parent = parent )

        self.tiling = tiling
        self._sims = stackedImageSources
        self._cache_size = cache_size
        self._request_queue_size = request_queue_size
        self._n_threads = n_threads
        self._layerIdChange_means_dirty = layerIdChange_means_dirty

        self._current_stack_id = self._sims.stackId
        self._cache = _TilesCache(self._current_stack_id, self._sims, maxstacks=self._cache_size)

        self._dirtyLayerQueue = LifoQueue(self._request_queue_size)

        self._sims.layerDirty.connect(self._onLayerDirty)
        self._sims.visibleChanged.connect(self._onVisibleChanged)
        self._sims.opacityChanged.connect(self._onOpacityChanged)
        self._sims.sizeChanged.connect(self._onSizeChanged)
        self._sims.orderChanged.connect(self._onOrderChanged)
        self._sims.stackIdChanged.connect(self._onStackIdChanged)
        if self._layerIdChange_means_dirty:
            self._sims.layerIdChanged.connect(self._onLayerIdChanged)

        self._keepRendering = True
        
        self._dirtyLayerThreads = [Thread(target=self._dirtyLayersWorker) for i in range(self._n_threads)]
        for thread in self._dirtyLayerThreads:
            thread.daemon = True
        [ thread.start() for thread in self._dirtyLayerThreads ]

    def getTiles( self, rectF ):
        '''Get tiles in rect and request a refresh.

        Returns tiles intersectinf with rectF immediatelly and requests a refresh
        of these tiles. Next time you call this function the tiles may be already
        (partially) updated. If you want to wait until the rendering is fully complete,
        call join().

        '''
        self.requestRefresh( rectF )
        tile_nos = self.tiling.intersectedF( rectF )
        stack_id = self._current_stack_id
        for tile_no in tile_nos:
            qimg, progress = self._cache.tile(stack_id, tile_no)
            t = TileProvider.Tile(tile_no,
                     qimg,
                     QRectF(self.tiling.imageRects[tile_no]),
                     progress,
                     self.tiling)
            yield t

    def requestRefresh( self, rectF ):
        '''Requests tiles to be refreshed.

        Returns immediatelly. Call join() to wait for
        the end of the rendering.

        '''
        tile_nos = self.tiling.intersectedF( rectF )
        for tile_no in tile_nos:
            stack_id = self._current_stack_id
            self._refreshTile( stack_id, tile_no )

    def join( self ):
        '''Wait until all refresh request are processed.

        Blocks until no refresh request pending anymore and all rendering
        finished.

        '''
        return self._dirtyLayerQueue.join()
#.........这里部分代码省略.........
开发者ID:LimpingTwerp,项目名称:volumina,代码行数:103,代码来源:tiling.py

示例5: SaveManager

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
class SaveManager(QObject):

    start_save = pyqtSignal()
    report_error = pyqtSignal(object)
    save_done = pyqtSignal()

    def __init__(self, parent):
        QObject.__init__(self, parent)
        self.count = 0
        self.last_saved = -1
        self.requests = LifoQueue()
        t = Thread(name='save-thread', target=self.run)
        t.daemon = True
        t.start()
        self.status_widget = w = SaveWidget(parent)
        self.start_save.connect(w.start, type=Qt.QueuedConnection)
        self.save_done.connect(w.stop, type=Qt.QueuedConnection)

    def schedule(self, tdir, container):
        self.count += 1
        self.requests.put((self.count, tdir, container))

    def run(self):
        while True:
            x = self.requests.get()
            if x is None:
                self.requests.task_done()
                self.__empty_queue()
                break
            try:
                count, tdir, container = x
                self.process_save(count, tdir, container)
            except:
                import traceback
                traceback.print_exc()
            finally:
                self.requests.task_done()

    def __empty_queue(self):
        ' Only to be used during shutdown '
        while True:
            try:
                self.requests.get_nowait()
            except Empty:
                break
            else:
                self.requests.task_done()

    def process_save(self, count, tdir, container):
        if count <= self.last_saved:
            shutil.rmtree(tdir, ignore_errors=True)
            return
        self.last_saved = count
        self.start_save.emit()
        try:
            self.do_save(tdir, container)
        except:
            import traceback
            self.report_error.emit(traceback.format_exc())
        self.save_done.emit()

    def do_save(self, tdir, container):
        try:
            save_container(container, container.path_to_ebook)
        finally:
            shutil.rmtree(tdir, ignore_errors=True)

    @property
    def has_tasks(self):
        return bool(self.requests.unfinished_tasks)

    def wait(self, timeout=30):
        if timeout is None:
            self.requests.join()
        else:
            try:
                join_with_timeout(self.requests, timeout)
            except RuntimeError:
                return False
        return True

    def shutdown(self):
        self.requests.put(None)
开发者ID:kutanari,项目名称:calibre,代码行数:85,代码来源:save.py

示例6: ThreadPool

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
class ThreadPool(object):
    def __init__(self, threadNum, max_tasks_per_period=10, seconds_per_period=30):
        self.pool = []  # 线程池
        self.threadNum = threadNum  # 线程数
        self.runningLock = Lock()  # 线程锁
        self.taskLock = Lock()  # getTask函数的锁
        self.running = 0  # 正在run的线程数

        # 设置为LIFO队列:在抓取了第一个post的页面后,随后需要添加所有其后的评论页,
        # 使用LIFO队列可以保证尽快将第一个post的所有评论抓取到,并存储
        self.taskQueue = LifoQueue()  # 任务队列

        # 一分钟内允许的最大访问次数
        self.max_tasks_per_period = max_tasks_per_period
        # 定制每分钟含有的秒数
        self.seconds_per_period = seconds_per_period
        # 当前周期内已经访问的网页数量
        self.currentPeriodVisits = 0
        # 将一分钟当作一个访问周期,记录当前周期的开始时间
        self.periodStart = time.time()  # 使用当前时间初始化

    def startThreads(self):
        """Create a certain number of threads and started to run 
        All Workers share the same ThreadPool
        """
        # 开始当前的抓取周期
        self.periodStart = time.time()
        for i in range(self.threadNum):
            self.pool.append(Worker(self, i))

    def stopThreads(self):
        for thread in self.pool:
            thread.stop()
            thread.join()
        del self.pool[:]

    def putTask(self, func, *args, **kargs):
        self.taskQueue.put((func, args, kargs))

    def getTask(self, *args, **kargs):
        # 进行访问控制: 判断当前周期内访问的网页数目是否大于最大数目
        if self.currentPeriodVisits >= self.max_tasks_per_period - 2:
            timeNow = time.time()
            seconds = timeNow - self.periodStart
            if seconds < self.seconds_per_period:  # 如果当前还没有过一分钟,则sleep
                remain = self.seconds_per_period - seconds
                print "ThreadPool Waiting for " + str(remain) + " seconds."
                time.sleep(int(remain + 1))

            self.periodStart = time.time()  # 重新设置开始时间
            self.currentPeriodVisits = 0

        try:
            # task = self.taskQueue.get(*args, **kargs)
            task = self.taskQueue.get_nowait()
        except Empty:
            return (None, None, None)

        self.currentPeriodVisits += 1

        return task

    def taskJoin(self, *args, **kargs):
        """Queue.join: Blocks until all items in the queue have been gotten and processed.
        """
        self.taskQueue.join()

    def taskDone(self, *args, **kargs):
        self.taskQueue.task_done()

    def increaseRunsNum(self):
        self.runningLock.acquire()
        self.running += 1  # 正在运行的线程数加1
        self.runningLock.release()

    def decreaseRunsNum(self):
        self.runningLock.acquire()
        self.running -= 1
        self.runningLock.release()

    def getTaskLeft(self):
        # 线程池的所有任务包括:
        # taskQueue中未被下载的任务, resultQueue中完成了但是还没被取出的任务, 正在运行的任务
        # 因此任务总数为三者之和
        return self.taskQueue.qsize() + self.running
开发者ID:hitalex,项目名称:tianya-forum-crawler,代码行数:87,代码来源:threadPool.py

示例7: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
def copy_bucket(aws_key, aws_secret_key, args):
    max_keys = 1000

    src = args.src_bucket
    dst = args.dest_bucket

    conn = S3Connection(aws_key, aws_secret_key)
    try:
        (src_bucket_name, src_path) = src.split('/', 1)
    except ValueError:
        src_bucket_name = src
        src_path = None
    try:
        (dst_bucket_name, dst_path) = dst.split('/', 1)
    except ValueError:
        dst_bucket_name = dst
        dst_path = None
    src_bucket = conn.get_bucket(src_bucket_name)

    if args.verbose:
        print
        print 'Start copy of %s to %s' % (src, dst)
        print

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in xrange(args.threads_no):
        if args.verbose:
            print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key,
                   src_bucket_name, dst_bucket_name,
                   src_path, dst_path, args)
        t.daemon = True
        t.start()

    i = 0

    while True:
        if args.verbose:
            print 'Fetch next %s, backlog currently at %s, have done %s' % \
                (max_keys, q.qsize(), i)
        try:
            keys = src_bucket.get_all_keys(max_keys=max_keys,
                                           marker=result_marker,
                                           prefix=src_path or '')
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            if args.verbose:
                print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                if args.verbose:
                    print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    if args.verbose:
        print 'Waiting for queue to be completed'
    q.join()

    if args.verbose:
        print
        print 'Done'
        print
开发者ID:ChronusAdmin,项目名称:s3_bucket_to_bucket_copy_py,代码行数:74,代码来源:s3_bucket_to_bucket_copy.py

示例8: str

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
  q.put('http://www.proxy4free.com/list/webproxy'+ str(i+1) +'.html')

def downloader(queue,proxies):
  while True:
    url = queue.get()
    r = requests.get(url)
    if r.status_code == 200:
      parsed_html = BeautifulSoup(r.content)
      table = parsed_html.body.find('table',attrs={'class':'table table-striped proxy-list'})
      table_body = table.find('tbody')

      rows = table_body.find_all('tr')
      print url + ' was fetched\n'
      for row in rows:
        proxy_tmp = {}
        ele_td = row.find("td",class_="first nsb")
        proxy_tmp['http'] = 'http://' + ele_td.text + ':80'
        if test_proxy(proxy_tmp):
            proxies.insert(proxy_tmp)
        else:
            print proxy_tmp['http'] + ' does not work, ignore it'
    q.task_done()

if __name__ == '__main__':
    for i in range(max_threads):
      worker = Thread(target=downloader, args=(q,proxies))
      worker.setDaemon(True)
      worker.start()
    q.join()
    client.close()
开发者ID:codingo,项目名称:pyreal,代码行数:32,代码来源:find_proxy.py

示例9: TileProvider

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
class TileProvider( QObject ):
    THREAD_HEARTBEAT = 0.2

    Tile = collections.namedtuple('Tile', 'id qimg rectF progress tiling')
    sceneRectChanged = pyqtSignal( QRectF )


    '''TileProvider __init__

    Keyword Arguments:
    cache_size                -- maximal number of encountered stacks
                                 to cache, i.e. slices if the imagesources
                                 draw from slicesources (default 10)
    request_queue_size        -- maximal number of request to queue up (default 100000)
    n_threads                 -- maximal number of request threads; this determines the
                                 maximal number of simultaneously running requests
                                 to the pixelpipeline (default: 2)
    layerIdChange_means_dirty -- layerId changes invalidate the cache; by default only
                                 stackId changes do that (default False)
    parent                    -- QObject

    '''

    @property
    def axesSwapped(self):
        return self._axesSwapped

    @axesSwapped.setter
    def axesSwapped(self, value):
        self._axesSwapped = value

    def __init__( self, tiling, stackedImageSources, cache_size=100,
                  request_queue_size=100000, n_threads=2,
                  layerIdChange_means_dirty=False, parent=None ):
        QObject.__init__( self, parent = parent )

        self.tiling = tiling
        self.axesSwapped = False
        self._sims = stackedImageSources
        self._cache_size = cache_size
        self._request_queue_size = request_queue_size
        self._n_threads = n_threads
        self._layerIdChange_means_dirty = layerIdChange_means_dirty

        self._current_stack_id = self._sims.stackId
        self._cache = _TilesCache(self._current_stack_id, self._sims,
                                  maxstacks=self._cache_size)

        self._dirtyLayerQueue = LifoQueue(self._request_queue_size)
        self._prefetchQueue = Queue(self._request_queue_size)

        self._sims.layerDirty.connect(self._onLayerDirty)
        self._sims.visibleChanged.connect(self._onVisibleChanged)
        self._sims.opacityChanged.connect(self._onOpacityChanged)
        self._sims.sizeChanged.connect(self._onSizeChanged)
        self._sims.orderChanged.connect(self._onOrderChanged)
        self._sims.stackIdChanged.connect(self._onStackIdChanged)
        if self._layerIdChange_means_dirty:
            self._sims.layerIdChanged.connect(self._onLayerIdChanged)

        self._keepRendering = True

        self._dirtyLayerThreads = [Thread(target=self._dirtyLayersWorker)
                                   for i in range(self._n_threads)]
        for thread in self._dirtyLayerThreads:
            thread.daemon = True
        [ thread.start() for thread in self._dirtyLayerThreads ]

    def getTiles( self, rectF ):
        '''Get tiles in rect and request a refresh.

        Returns tiles intersecting with rectF immediately and requests
        a refresh of these tiles. Next time you call this function the
        tiles may be already (partially) updated. If you want to wait
        until the rendering is fully complete, call join().

        '''
        self.requestRefresh( rectF )
        tile_nos = self.tiling.intersected( rectF )
        stack_id = self._current_stack_id
        for tile_no in tile_nos:
            qimg, progress = self._cache.tile(stack_id, tile_no)
            yield TileProvider.Tile(
                tile_no,
                qimg,
                QRectF(self.tiling.imageRects[tile_no]),
                progress,
                self.tiling)

    def requestRefresh( self, rectF ):
        '''Requests tiles to be refreshed.

        Returns immediately. Call join() to wait for
        the end of the rendering.

        '''
        tile_nos = self.tiling.intersected( rectF )
        for tile_no in tile_nos:
            stack_id = self._current_stack_id
            self._refreshTile( stack_id, tile_no )
#.........这里部分代码省略.........
开发者ID:buotex,项目名称:volumina,代码行数:103,代码来源:tiling.py

示例10: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
def copy_bucket(aws_key, aws_secret_key, src, dst):
    max_keys = 1000

    conn = S3Connection(aws_key, aws_secret_key)
    try:
        (src_bucket_name, src_path) = src.split('/', 1)
    except ValueError:
        src_bucket_name = src
        src_path = None
    try:
        (dst_bucket_name, dst_path) = dst.split('/', 1)
    except ValueError:
        dst_bucket_name = dst
        dst_path = None
    if dst_path is not None:
        raise ValueError("not currently implemented to set dest path; must use default, which will mirror the source")
    src_bucket = conn.get_bucket(src_bucket_name)

    print
    print 'Start copy of %s to %s' % (src, dst)
    print

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(20):
        print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key,
                   src_bucket_name, dst_bucket_name,
                   src_path, dst_path)
        t.daemon = True
        t.start()

    i = 0

    while True:
        print 'm (%s): Fetch next %s, backlog currently at %s, have done %s' % (src_path, max_keys, q.qsize(), i)
        try:
            keys = src_bucket.get_all_keys(max_keys=max_keys,
                                           marker=result_marker,
                                           prefix=src_path or '')
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            # print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    print 'm (%s): Waiting for queue to be completed' % (src_path)
    q.join()
    print
    print 'm (%s): Done' % (src_path)
    print
开发者ID:neelakanta,项目名称:s3_bucket_to_bucket_copy_py,代码行数:64,代码来源:s3_bucket_to_bucket_copy.py

示例11: queueMsg

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]

        if args.debug:
            queueMsg("\"max\", \"file\", \"dir\", \"results\"")
        # lets just hang back and wait for the queues to empty
        print "If you need to pause this job, press Ctrl-C once"
        time.sleep(1)
        while not terminateThreads:
            if args.debug:
                queueMsg("\"%s\", \"%s\", \"%s\", \"%s\"\n"%(args.queueParams['max'], fileQueue.qsize(), dirQueue.qsize(), resultsQueue.qsize()))
            time.sleep(.1)
            
            if fileQueue.empty() and dirQueue.empty():
                queueMsg("\"%s\", \"%s\", \"%s\", \"%s\"\n"%(args.queueParams['max'], fileQueue.qsize(), dirQueue.qsize(), resultsQueue.qsize()))
                print "waiting for directory queue to clear..."
                dirQueue.join()
                print "waiting for file queue to clear..."
                fileQueue.join()
                print "waiting for worker processes to complete..."
                terminateThreads = True
                print "waiting for results queue to clear..."
                resultsQueue.join()
                print "exporting statistics..."
                exportStats()
                print "closing files..."
                for file in fileHandles:
                    fileHandles[file].close()
                print "cleaning up process files..."
                cleanup()
                exit(1)
    except KeyboardInterrupt:
开发者ID:Arkivum,项目名称:ArkCargo,代码行数:32,代码来源:mkcargo.py

示例12: B2BucketThreadedLocal

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import join [as 别名]
class B2BucketThreadedLocal(B2Bucket): 
    def __init__(self, *args):
        super(B2BucketThreaded, self).__init__( *args)
        
        num_threads=50
        self.queue = LifoQueue(num_threads*2)
        
        self.file_locks = defaultdict(Lock)
        
        self.running = True
        
        self.threads = []
        print "Thread ",
        for i in xrange(num_threads):
            t = threading.Thread(target=self._file_updater)
            t.start()
            self.threads.append(t)
            
            print ".",
            
        print 
        
        self.pre_queue_lock = Lock()
        self.pre_queue_running = True
        self.pre_queue = LifoQueue(num_threads*2)
        
        self.pre_file_dict = {}
        
        self.pre_thread = threading.Thread(target=self._prepare_update)
        self.pre_thread.start()
        
        
    
    def _prepare_update(self):
        while self.pre_queue_running:
            try:
                filename, local_filename, operation  = self.pre_queue.get(True,1)
                self.pre_file_dict[filename] = (time(), local_filename, operation)
                self.pre_queue.task_done()
            except Empty:
                for filename, (timestamp, local_filename, operation) in self.pre_file_dict.items():
                    if time()-timestamp > 15:
                        self.queue.put((filename, local_filename, operation))
                    
                        del self.pre_file_dict[filename]
        
        for filename, (timestamp, local_filename, operation) in self.pre_file_dict.items():
            self.queue.put((filename, local_filename, operation))
            del self.pre_file_dict[filename]
            
    def _file_updater(self):
        while self.running:
            try:
                filename, local_filename, operation  = self.queue.get(True,1)
            except Empty:
                continue
            
            
            with self.file_locks[filename]:
                if operation == "deletion":
                    super(B2BucketThreaded,self)._delete_file(filename)
                    self.queue.task_done()
                    
                elif operation == "upload":
                    super(B2BucketThreaded,self)._put_file(filename, local_filename)
                    self.queue.task_done()
                    
                elif operation == "download":
                    super(B2BucketThreaded,self)._get_file(filename, local_filename)
                    self.queue.task_done()
                    
                else:
                    self.logger.error("Invalid operation %s on %s" % (operation, filename))
                
            
    
    def __enter__(self):
        return self
        
    def __exit__(self, *args, **kwargs):
        self.logger.info("Waiting for all B2 requests to complete")
        
        self.logger.info("Pre-Queue contains %s elements", self.pre_queue.qsize())
        self.pre_queue.join()
        
        self.logger.info("Joining pre queue thread")
        self.pre_queue_running = False
        self.pre_thread.join()
        
        self.logger.info("Queue contains %s elements", self.queue.qsize())
        self.queue.join()
        
        self.logger.info("Joining threads")
        self.running = False
        for t in self.threads:
            t.join()
            
            
    def put_file(self, filename, local_filename):
        with self.pre_queue_lock:
#.........这里部分代码省略.........
开发者ID:Linutux,项目名称:b2_fuse,代码行数:103,代码来源:b2bucket_threaded.py


注:本文中的Queue.LifoQueue.join方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。