当前位置: 首页>>代码示例>>Python>>正文


Python LifoQueue.qsize方法代码示例

本文整理汇总了Python中Queue.LifoQueue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python LifoQueue.qsize方法的具体用法?Python LifoQueue.qsize怎么用?Python LifoQueue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Queue.LifoQueue的用法示例。


在下文中一共展示了LifoQueue.qsize方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: copyBucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
def copyBucket(maxKeys=1000):
    print 'start'

    s_conn = S3Connection(source_aws_key, source_aws_secret_key)
    srcBucket = s_conn.get_bucket(srcBucketName)

    resultMarker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(10):
        print 'adding worker'
        t = Worker(q)
        t.daemon = True
        t.start()

    while True:
        print 'fetch next 1000, backlog currently at %i' % q.qsize()
        keys = srcBucket.get_all_keys(max_keys=maxKeys, marker=resultMarker)
        for k in keys:
            q.put(k.key)
        if len(keys) < maxKeys:
            print 'Done'
            break
        resultMarker = keys[maxKeys - 1].key

    q.join()
    print 'done'
开发者ID:yerachw,项目名称:S3Copy,代码行数:29,代码来源:s3copy.py

示例2: copy_job

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
 def copy_job(self, max_keys=1000):
     logging.info( 'start copy_bucket' )
     src = self.job['source']
     tgt = self.job['target']
     
     conn = self.get_conn( tgt['owner'] )
     srcBucket = conn.get_bucket( src['bucket'] )
     tgtBucket = conn.get_bucket( tgt['bucket'] )
     
     if self.job['options']['allow-acl-change']:
         ownerBucketView = self.get_conn( src['owner'] ).get_bucket( src['bucket'] )
         ownerID = self.users[ tgt['owner'] ]['canonical-id']
     else:
         ownerBucketView = None
         ownerID = None
     resultMarker = ''
     q = LifoQueue(maxsize=5000)
     for i in range(self.parallel):
         logging.info( 'adding worker %d' % i )
         t = BucketCopyWorker(q, srcBucket, tgtBucket, src['key-prefix'], tgt['key-prefix'],  ownerBucketView, ownerID)
         t.daemon = True
         t.start()
     while True:
         logging.info( 'fetch next 1000, backlog currently at %i' % q.qsize() )
         keys = srcBucket.get_all_keys( prefix=src['key-prefix'], max_keys=max_keys, marker = resultMarker)
         for k in keys:
             q.put(k.key)
         if len(keys) < max_keys:
             print 'Done'
             break
         resultMarker = keys[maxKeys - 1].key
     q.join()
     logging.info( 'done copy_bucket' )
开发者ID:uhjish,项目名称:bucketbrigade,代码行数:35,代码来源:bucketbrigade.py

示例3: Plan

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
    def Plan(self, start_config, goal_config):
        start_time = time.time()
        if self.visualize and hasattr(self.planning_env, "InitializePlot"):
            self.planning_env.InitializePlot(goal_config)

        plan = []
        # TODO: Here you will implement the breadth first planner
        #  The return path should be a numpy array
        #  of dimension k x n where k is the number of waypoints
        #  and n is the dimension of the robots configuration space
        q = LifoQueue()
        start_id = self.planning_env.discrete_env.ConfigurationToNodeId(start_config)
        goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config)
        found = False
        q.put(start_id)
        explored =[start_id]
        backtrack = {}
        backtrack[start_id] = None
        n= 0
        while (q.qsize()>0) and not found:
            current = q.get()          
            successors = self.planning_env.GetSuccessors(current)
            for successor in successors:
                if not successor in backtrack:
                    n = n+1
                    q.put(successor)
                    #explored.append(successor)
                    backtrack[successor] = current
                    if self.visualize: 
                        s = self.planning_env.discrete_env.NodeIdToConfiguration(successor)
                        c = self.planning_env.discrete_env.NodeIdToConfiguration(current)
                        self.planning_env.PlotEdge(c,s)
                    if successor == goal_id:
                        found = True
                        break
        # Shortest Path
        path = []
        path.append(self.planning_env.discrete_env.NodeIdToConfiguration(goal_id))
        element = backtrack[goal_id]
        while element is not None:
            path.append(self.planning_env.discrete_env.NodeIdToConfiguration(element))
            element = backtrack[element]
        plan = path[::-1]

        if self.visualize: 
            for i in range(len(path) - 1):
                self.planning_env.PlotRedEdge(path[i],path[i+1])
        print "number of nodes"
        print n
        print "time (in seconds):" 
        print time.time()- start_time
        path_length = 0
        for i in range(len(path) - 1):
            path_length = path_length + self.planning_env.ComputeDistance(self.planning_env.discrete_env.ConfigurationToNodeId(path[i]), self.planning_env.discrete_env.ConfigurationToNodeId(path[i+1]))
            
        print "path path_length"
        print path_length
        return plan
开发者ID:rushat,项目名称:RobotAutonomyHW3,代码行数:60,代码来源:DepthFirstPlanner.py

示例4: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
def copy_bucket(aws_key, aws_secret_key, src_bucket_name, dst_bucket_name):
    print
    print 'Start copy of %s to %s' % (src_bucket_name, dst_bucket_name)
    print
    max_keys = 1000

    conn = S3Connection(aws_key, aws_secret_key)
    srcBucket = conn.get_bucket(src_bucket_name)

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(20):
        print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key, src_bucket_name, dst_bucket_name)
        t.daemon = True
        t.start()

    i = 0

    while True:
        print 'Fetch next %s, backlog currently at %s, have done %s' % (max_keys, q.qsize(), i)
        try:
            keys = srcBucket.get_all_keys(max_keys=max_keys, marker=result_marker)
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    print 'Waiting for queue to be completed'
    q.join()
    print
    print 'Done'
    print
开发者ID:WorkFu,项目名称:s3_bucket_to_bucket_copy_py,代码行数:47,代码来源:s3_bucket_to_bucket_copy.py

示例5: graham_scan

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
def graham_scan(points):
    """

    :param points: numpy array of 2-dimensional points
    :return: Convex hull as another numpy array of points
    """
    ch = LifoQueue()
    leftmost = points[np.argmin(points[:, 0])]  # finding the leftmost point... definitely in CH

    dtype = [('x', np.float64), ('y', np.float64), ('slope', np.float64)]  # preparing a nicer object for sorting
    cpts = np.zeros(len(points) - 1, dtype=dtype)
    cpts[:]['x'] = points[1:, 0]
    cpts[:]['y'] = points[1:, 1]
    cpts[:]['slope'] = (cpts[:]['y'] - leftmost[1]) / (cpts[:]['x'] - leftmost[0])  # angle <-> slope from leftmost

    sorted_pts = np.sort(cpts, order=['slope', 'x'])  # sort by angle (slope), then distance from leftmost
                                                      # shows which points are colinear

    mask = np.zeros(len(sorted_pts), dtype=bool)  # getting rid of points with same angle from leftmost
    mask = np.logical_not(mask)
    for i in range(len(sorted_pts[1:])):
        mask[i - 1] = not sorted_pts[i - 1]['slope'] == sorted_pts[i]['slope']  # only keep farthest away
    sorted_pts = sorted_pts[mask]

    sorted_pts[:] = sorted_pts[::-1]  # sort greatest slope to lowest (move clockwise)

    pts = np.zeros((len(sorted_pts) + 1, 2))  # putting leftmost back into a new array object
    pts[1:, 0] = sorted_pts[:]['x']
    pts[1:, 1] = sorted_pts[:]['y']
    pts[0] = leftmost

    ch.put(pts[0])  # leftmost and the point with the highest slope are in the CH for sure
    ch.put(pts[1])
    for i, pt in enumerate(pts):
        if i < 2:
            continue
        else:
            last = ch.get()
            second_to_last = ch.get()
            side = which_side(second_to_last, pts[i], last)  # Less than 0 => on the left, o/w on the right
            while side > 0:  # if last point put in on right side, it must have been wrong to be in CH
                last = second_to_last
                second_to_last = ch.get()
                side = which_side(second_to_last, pts[i], last)
            ch.put(second_to_last)
            ch.put(last)
            ch.put(pt)

    return np.array([ch.get() for i in range(ch.qsize())])  # Put the queue into an array
开发者ID:idelbrid,项目名称:Algorithms,代码行数:51,代码来源:convex_hull.py

示例6: iterative

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
     def iterative(path, path_data):
         stack=LifoQueue()
         while 1:
             if not (type(path_data) == dict and path_data):
                 changes.append(self.store_one(path, path_data))
             else:
                 for node in path_data:
                     node_path = path + '/' + node
                     node_data = path_data[node]
 
                     change = self.store_one(node_path, node_data)
                     changes.append(change)
 
                     if type(node_data) == type(dict()):
                         stack.put([node_path, node_data])
             if stack.qsize():
                 path,path_data=stack.get()
                 continue;
             break;
开发者ID:abehaskins,项目名称:python-firebasin,代码行数:21,代码来源:structure.py

示例7: process_transaction

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
 def process_transaction(self, transaction_id):
     stack = LifoQueue()
     tasks = self.storage.get_tasks(transaction_id)
     logger.debug(tasks)
     for i, task in enumerate(tasks):
         try:
             task = Task(task)
             task.run()
             self.storage.set_task_processed(transaction_id, i, True)
             stack.put(task)
         except:
             logger.critical(format_exc())
             self.storage.set_task_processed(transaction_id, i, False)
             while stack.qsize():
                 task = stack.get()
                 task.reverse()
             return {
                 'error': True,
                 'processed': i,
             }
     return {
         'success': True
     }
开发者ID:toudi,项目名称:katana,代码行数:25,代码来源:abstract.py

示例8: Gazzle

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
class Gazzle(object):
	def __init__(self, *args, **kwargs):
		self.sockets = []

		mongo_client = MongoClient('localhost', 27017)
		self.mongo = mongo_client['gazzle']
		# self.mongo.drop_collection('pages')
		self.pages = self.mongo['pages']

		self._init_whoosh()

		self.pageset = {}
		self.crawl_thread_count = kwargs.get('crawl_threads', 3)
		self.pending_crawls = 0
		self.pending_lock = threading.RLock()
		self.frontier = Queue()
		self.crawlCount = 0
		self.crawling = False
		self.crawl_cond = threading.Condition()
		self.crawl_lock = threading.RLock()
		self._init_crawl()

		self.index_set = set()
		self.index_q = LifoQueue()
		self.index_altq = LifoQueue()
		self.index_alt_switchoff = False
		self.indexing = False
		self.index_cond = threading.Condition()
		self.index_lock = threading.RLock()
		self._init_index()
		self._index_size()

		self.crosssite_crawl = False

		self.pagerank_cond = threading.Condition()
		self._start_thread(target = self._crawl, count = self.crawl_thread_count)
		self._start_thread(target = self._index, count = 1) # index writer doesn't support multithreading
		self._start_thread(target = self._pagerank, count = 1)
		self._start_thread(target = self._assert_thread, count=1)


	def _init_crawl(self):
		self.pageset = {}
		self.frontier = Queue()
		for page in self.pages.find():
			self.pageset[page['url']] = page['page_id']
		for page in self.pages.find({'crawled': False}):
			self.frontier.put(page['page_id'])
		self.crawlCount = self.pages.find({'crawled': True}).count()
		print('Added %d pages to page set' % len(self.pageset))
		print('Added %d pages to frontier' % self.frontier.qsize())
		print('Crawl count set to %d' % self.crawlCount)

	def _init_index(self):
		self.index_set = set()
		self.index_q = LifoQueue()
		for page in self.pages.find({'indexed': True}):
			self.index_set.add(page['page_id'])
		for page in self.pages.find({'crawled':True, 'indexed': False}):
			self.index_q.put(page['page_id'])
		print('Added %d pages to index set' % len(self.index_set))
		print('Added %d pages to index queue' % self.index_q.qsize())

	def _init_whoosh(self, clear = False):
		schema = Schema(page_id=STORED, title=TEXT(stored=True), content=TEXT, url=ID(stored=True))
		if not os.path.exists("index"):
	   		os.mkdir("index")
	   		clear = True
	   	if clear:
			self.index = create_in('index', schema)
		else:
			self.index = open_dir("index")


	def _assert_thread(self):
		while True:
			a = self.pages.find_one({'crawled': True, 'title': {'$exists': False}})
			assert a == None, 'Found inconsistent page in db ID: %d URL: %s' % (a['page_id'], a['url'])
			time.sleep(1)

	def _pagerank(self):
		while True:
			with self.pagerank_cond:
				self.pagerank_cond.wait()
			pages = self.pages.find({'crawled': True, 'indexed': True}, {
				'_id':False,
				'content': False,
				'links.url': False
			})

			RANK_SCALE = 1
			ALPHA = 0.25

			page_count = pages.count()

			id_to_ind = {}
			ind_to_id = []
			for page in pages:
				ind = len(id_to_ind)
				ind_to_id.append(page['page_id'])
#.........这里部分代码省略.........
开发者ID:ha-D,项目名称:Gazzle,代码行数:103,代码来源:gazzle.py

示例9: ThreadPool

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
class ThreadPool(object):
    def __init__(self, threadNum, max_tasks_per_period=10, seconds_per_period=30):
        self.pool = []  # 线程池
        self.threadNum = threadNum  # 线程数
        self.runningLock = Lock()  # 线程锁
        self.taskLock = Lock()  # getTask函数的锁
        self.running = 0  # 正在run的线程数

        # 设置为LIFO队列:在抓取了第一个post的页面后,随后需要添加所有其后的评论页,
        # 使用LIFO队列可以保证尽快将第一个post的所有评论抓取到,并存储
        self.taskQueue = LifoQueue()  # 任务队列

        # 一分钟内允许的最大访问次数
        self.max_tasks_per_period = max_tasks_per_period
        # 定制每分钟含有的秒数
        self.seconds_per_period = seconds_per_period
        # 当前周期内已经访问的网页数量
        self.currentPeriodVisits = 0
        # 将一分钟当作一个访问周期,记录当前周期的开始时间
        self.periodStart = time.time()  # 使用当前时间初始化

    def startThreads(self):
        """Create a certain number of threads and started to run 
        All Workers share the same ThreadPool
        """
        # 开始当前的抓取周期
        self.periodStart = time.time()
        for i in range(self.threadNum):
            self.pool.append(Worker(self, i))

    def stopThreads(self):
        for thread in self.pool:
            thread.stop()
            thread.join()
        del self.pool[:]

    def putTask(self, func, *args, **kargs):
        self.taskQueue.put((func, args, kargs))

    def getTask(self, *args, **kargs):
        # 进行访问控制: 判断当前周期内访问的网页数目是否大于最大数目
        if self.currentPeriodVisits >= self.max_tasks_per_period - 2:
            timeNow = time.time()
            seconds = timeNow - self.periodStart
            if seconds < self.seconds_per_period:  # 如果当前还没有过一分钟,则sleep
                remain = self.seconds_per_period - seconds
                print "ThreadPool Waiting for " + str(remain) + " seconds."
                time.sleep(int(remain + 1))

            self.periodStart = time.time()  # 重新设置开始时间
            self.currentPeriodVisits = 0

        try:
            # task = self.taskQueue.get(*args, **kargs)
            task = self.taskQueue.get_nowait()
        except Empty:
            return (None, None, None)

        self.currentPeriodVisits += 1

        return task

    def taskJoin(self, *args, **kargs):
        """Queue.join: Blocks until all items in the queue have been gotten and processed.
        """
        self.taskQueue.join()

    def taskDone(self, *args, **kargs):
        self.taskQueue.task_done()

    def increaseRunsNum(self):
        self.runningLock.acquire()
        self.running += 1  # 正在运行的线程数加1
        self.runningLock.release()

    def decreaseRunsNum(self):
        self.runningLock.acquire()
        self.running -= 1
        self.runningLock.release()

    def getTaskLeft(self):
        # 线程池的所有任务包括:
        # taskQueue中未被下载的任务, resultQueue中完成了但是还没被取出的任务, 正在运行的任务
        # 因此任务总数为三者之和
        return self.taskQueue.qsize() + self.running
开发者ID:hitalex,项目名称:tianya-forum-crawler,代码行数:87,代码来源:threadPool.py

示例10: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
def copy_bucket(aws_key, aws_secret_key, args):
    max_keys = 1000

    src = args.src_bucket
    dst = args.dest_bucket

    conn = S3Connection(aws_key, aws_secret_key)
    try:
        (src_bucket_name, src_path) = src.split('/', 1)
    except ValueError:
        src_bucket_name = src
        src_path = None
    try:
        (dst_bucket_name, dst_path) = dst.split('/', 1)
    except ValueError:
        dst_bucket_name = dst
        dst_path = None
    src_bucket = conn.get_bucket(src_bucket_name)

    if args.verbose:
        print
        print 'Start copy of %s to %s' % (src, dst)
        print

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in xrange(args.threads_no):
        if args.verbose:
            print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key,
                   src_bucket_name, dst_bucket_name,
                   src_path, dst_path, args)
        t.daemon = True
        t.start()

    i = 0

    while True:
        if args.verbose:
            print 'Fetch next %s, backlog currently at %s, have done %s' % \
                (max_keys, q.qsize(), i)
        try:
            keys = src_bucket.get_all_keys(max_keys=max_keys,
                                           marker=result_marker,
                                           prefix=src_path or '')
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            if args.verbose:
                print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                if args.verbose:
                    print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    if args.verbose:
        print 'Waiting for queue to be completed'
    q.join()

    if args.verbose:
        print
        print 'Done'
        print
开发者ID:ChronusAdmin,项目名称:s3_bucket_to_bucket_copy_py,代码行数:74,代码来源:s3_bucket_to_bucket_copy.py

示例11: copy_bucket

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
def copy_bucket(aws_key, aws_secret_key, src, dst):
    max_keys = 1000

    conn = S3Connection(aws_key, aws_secret_key)
    try:
        (src_bucket_name, src_path) = src.split('/', 1)
    except ValueError:
        src_bucket_name = src
        src_path = None
    try:
        (dst_bucket_name, dst_path) = dst.split('/', 1)
    except ValueError:
        dst_bucket_name = dst
        dst_path = None
    if dst_path is not None:
        raise ValueError("not currently implemented to set dest path; must use default, which will mirror the source")
    src_bucket = conn.get_bucket(src_bucket_name)

    print
    print 'Start copy of %s to %s' % (src, dst)
    print

    result_marker = ''
    q = LifoQueue(maxsize=5000)

    for i in range(20):
        print 'Adding worker thread %s for queue processing' % i
        t = Worker(q, i, aws_key, aws_secret_key,
                   src_bucket_name, dst_bucket_name,
                   src_path, dst_path)
        t.daemon = True
        t.start()

    i = 0

    while True:
        print 'm (%s): Fetch next %s, backlog currently at %s, have done %s' % (src_path, max_keys, q.qsize(), i)
        try:
            keys = src_bucket.get_all_keys(max_keys=max_keys,
                                           marker=result_marker,
                                           prefix=src_path or '')
            if len(keys) == 0:
                break
            for k in keys:
                i += 1
                q.put(k.key)
            # print 'Added %s keys to queue' % len(keys)
            if len(keys) < max_keys:
                print 'All items now in queue'
                break
            result_marker = keys[max_keys - 1].key
            while q.qsize() > (q.maxsize - max_keys):
                time.sleep(1)  # sleep if our queue is getting too big for the next set of keys
        except BaseException:
            logging.exception('error during fetch, quitting')
            break

    print 'm (%s): Waiting for queue to be completed' % (src_path)
    q.join()
    print
    print 'm (%s): Done' % (src_path)
    print
开发者ID:neelakanta,项目名称:s3_bucket_to_bucket_copy_py,代码行数:64,代码来源:s3_bucket_to_bucket_copy.py

示例12: primeQueues

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
            pathWorker.setDaemon(True)
            pathWorker.start()

        # now the worker threads are processing lets feed the fileQueue, this will block if the 
        # rework file is larger than the queue.
        primeQueues(fileQueue, dirQueue)


        if args.debug:
            queueMsg("\"max\", \"file\", \"dir\", \"results\"")
        # lets just hang back and wait for the queues to empty
        print "If you need to pause this job, press Ctrl-C once"
        time.sleep(1)
        while not terminateThreads:
            if args.debug:
                queueMsg("\"%s\", \"%s\", \"%s\", \"%s\"\n"%(args.queueParams['max'], fileQueue.qsize(), dirQueue.qsize(), resultsQueue.qsize()))
            time.sleep(.1)
            
            if fileQueue.empty() and dirQueue.empty():
                queueMsg("\"%s\", \"%s\", \"%s\", \"%s\"\n"%(args.queueParams['max'], fileQueue.qsize(), dirQueue.qsize(), resultsQueue.qsize()))
                print "waiting for directory queue to clear..."
                dirQueue.join()
                print "waiting for file queue to clear..."
                fileQueue.join()
                print "waiting for worker processes to complete..."
                terminateThreads = True
                print "waiting for results queue to clear..."
                resultsQueue.join()
                print "exporting statistics..."
                exportStats()
                print "closing files..."
开发者ID:Arkivum,项目名称:ArkCargo,代码行数:33,代码来源:mkcargo.py

示例13: B2BucketThreadedLocal

# 需要导入模块: from Queue import LifoQueue [as 别名]
# 或者: from Queue.LifoQueue import qsize [as 别名]
class B2BucketThreadedLocal(B2Bucket): 
    def __init__(self, *args):
        super(B2BucketThreaded, self).__init__( *args)
        
        num_threads=50
        self.queue = LifoQueue(num_threads*2)
        
        self.file_locks = defaultdict(Lock)
        
        self.running = True
        
        self.threads = []
        print "Thread ",
        for i in xrange(num_threads):
            t = threading.Thread(target=self._file_updater)
            t.start()
            self.threads.append(t)
            
            print ".",
            
        print 
        
        self.pre_queue_lock = Lock()
        self.pre_queue_running = True
        self.pre_queue = LifoQueue(num_threads*2)
        
        self.pre_file_dict = {}
        
        self.pre_thread = threading.Thread(target=self._prepare_update)
        self.pre_thread.start()
        
        
    
    def _prepare_update(self):
        while self.pre_queue_running:
            try:
                filename, local_filename, operation  = self.pre_queue.get(True,1)
                self.pre_file_dict[filename] = (time(), local_filename, operation)
                self.pre_queue.task_done()
            except Empty:
                for filename, (timestamp, local_filename, operation) in self.pre_file_dict.items():
                    if time()-timestamp > 15:
                        self.queue.put((filename, local_filename, operation))
                    
                        del self.pre_file_dict[filename]
        
        for filename, (timestamp, local_filename, operation) in self.pre_file_dict.items():
            self.queue.put((filename, local_filename, operation))
            del self.pre_file_dict[filename]
            
    def _file_updater(self):
        while self.running:
            try:
                filename, local_filename, operation  = self.queue.get(True,1)
            except Empty:
                continue
            
            
            with self.file_locks[filename]:
                if operation == "deletion":
                    super(B2BucketThreaded,self)._delete_file(filename)
                    self.queue.task_done()
                    
                elif operation == "upload":
                    super(B2BucketThreaded,self)._put_file(filename, local_filename)
                    self.queue.task_done()
                    
                elif operation == "download":
                    super(B2BucketThreaded,self)._get_file(filename, local_filename)
                    self.queue.task_done()
                    
                else:
                    self.logger.error("Invalid operation %s on %s" % (operation, filename))
                
            
    
    def __enter__(self):
        return self
        
    def __exit__(self, *args, **kwargs):
        self.logger.info("Waiting for all B2 requests to complete")
        
        self.logger.info("Pre-Queue contains %s elements", self.pre_queue.qsize())
        self.pre_queue.join()
        
        self.logger.info("Joining pre queue thread")
        self.pre_queue_running = False
        self.pre_thread.join()
        
        self.logger.info("Queue contains %s elements", self.queue.qsize())
        self.queue.join()
        
        self.logger.info("Joining threads")
        self.running = False
        for t in self.threads:
            t.join()
            
            
    def put_file(self, filename, local_filename):
        with self.pre_queue_lock:
#.........这里部分代码省略.........
开发者ID:Linutux,项目名称:b2_fuse,代码行数:103,代码来源:b2bucket_threaded.py


注:本文中的Queue.LifoQueue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。