当前位置: 首页>>代码示例>>Python>>正文


Python Queue.join方法代码示例

本文整理汇总了Python中six.moves.queue.Queue.join方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.join方法的具体用法?Python Queue.join怎么用?Python Queue.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在six.moves.queue.Queue的用法示例。


在下文中一共展示了Queue.join方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Pool

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
class Pool(object):
    class Error(Exception):
        pass
    
    def __init__(self, threads, host, port, ssl, user, password):
        self._threads=[]
        self._queue = Queue(maxsize=1000)
        count=0
        while len(self._threads) < threads and count < 3* threads:
            try:
                count+=1
                w=Downloader(self._queue, host, port, ssl, user, password)
                w.start()
                self._threads.append(w)
            except SOFT_ERRORS as e:
                log.warn('Cannot create downloder thread: %s', e)
                
        if len(self._threads) != threads:
            log.error('Cannot create enough workers')
            raise Pool.Error('Cannot create enough workers')
        
    def wait_finish(self):
        self._queue.join()
        
    def stop(self):
        for t in self._threads:
            t.stop()
        
    def download(self, **kwargs):
        kwargs['retry']=0
        self._queue.put(kwargs)
                
        
        
开发者ID:izderadicka,项目名称:imap_detach,代码行数:33,代码来源:pool.py

示例2: lines

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
    def lines(self, fuseki_process):
        """
        Provides an iterator generating the encoded string representation
        of each member of this metarelate mapping translation.

        Returns:
            An iterator of string.

        """
        msg = '\tGenerating phenomenon translation {!r}.'
        print(msg.format(self.mapping_name))
        lines = ['\n%s = {\n' % self.mapping_name]
        # Retrieve encodings for the collection of mapping instances.
        # Retrieval is threaded as it is heavily bound by resource resolution
        # over http.
        # Queue for metarelate mapping instances
        mapenc_queue = Queue()
        for mapping in self.mappings:
            mapenc_queue.put(mapping)
        # deque to contain the results of the jobs processed from the queue
        mapencs = deque()
        # run worker threads
        for i in range(MAXTHREADS):
            MappingEncodeWorker(mapenc_queue, mapencs, fuseki_process).start()
        # block progress until the queue is empty
        mapenc_queue.join()
        # end of threaded retrieval process.

        # now sort the payload
        payload = [mapenc.encoding for mapenc in mapencs]
        payload.sort(key=self._key)
        lines.extend(payload)
        lines.append('    }\n')
        return iter(lines)
开发者ID:MahatmaCane,项目名称:iris,代码行数:36,代码来源:__init__.py

示例3: run

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
    def run(self):
        args = list(islice(self.reqs, self.requests))
        if self.shuffle:
            random.shuffle(args)
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)

        starttime = time.time()
        q, p = Queue(), Queue()
        for _ in six.moves.range(self.concurrency):
            t = Thread(target=worker, args=(self.host, q, p, self.verbose))
            t.daemon = True
            t.start()
        for a in args:
            q.put(a)
        q.join()

        outputs = []
        for _ in six.moves.range(self.requests):
            outputs.append(p.get())

        elapsed = time.time() - starttime
        print()
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)
        print("Elapsed time  : %.3fs" % elapsed)
        print("Avg time p/req: %.3fs" % (elapsed / len(args)))
        print("Received (per status code or error):")
        for c, n in Counter(outputs).items():
            print("  %s: %d" % (c, n))
开发者ID:dvska,项目名称:splash,代码行数:32,代码来源:stress.py

示例4: daily_metadata

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
def daily_metadata(year, month, day, dst_folder, writers=[file_writer], geometry_check=None,
                   num_worker_threads=1):
    """ Extra metadata for all products in a specific date """

    threaded = False

    counter = {
        'products': 0,
        'saved_tiles': 0,
        'skipped_tiles': 0,
        'skipped_tiles_paths': []
    }

    if num_worker_threads > 1:
        threaded = True
        queue = Queue()

    # create folders
    year_dir = os.path.join(dst_folder, str(year))
    month_dir = os.path.join(year_dir, str(month))
    day_dir = os.path.join(month_dir, str(day))

    product_list = get_products_metadata_path(year, month, day)

    logger.info('There are %s products in %s-%s-%s' % (len(list(iterkeys(product_list))),
                                                       year, month, day))

    for name, product in iteritems(product_list):
        product_dir = os.path.join(day_dir, name)

        if threaded:
            queue.put([product, product_dir, counter, writers, geometry_check])
        else:
            counter = product_metadata(product, product_dir, counter, writers, geometry_check)

    if threaded:
        def worker():
            while not queue.empty():
                args = queue.get()
                try:
                    product_metadata(*args)
                except Exception:
                    exc = sys.exc_info()
                    logger.error('%s tile skipped due to error: %s' % (threading.current_thread().name,
                                                                       exc[1].__str__()))
                    args[2]['skipped_tiles'] += 1
                queue.task_done()

        threads = []
        for i in range(num_worker_threads):
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)

        queue.join()

    return counter
开发者ID:developmentseed,项目名称:sentinel-s3,代码行数:59,代码来源:main.py

示例5: _BatchWriter

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
开发者ID:scrapinghub,项目名称:python-hubstorage,代码行数:53,代码来源:batchuploader.py

示例6: WorkerPool

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
class WorkerPool(object):

    def __init__(self, num_threads=None):
        if num_threads is None:
            num_threads = multiprocessing.cpu_count()
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        self.tasks.put((func, args, kargs))

    def wait_for_completion(self):
        self.tasks.join()
开发者ID:MortalCatalyst,项目名称:lektor,代码行数:16,代码来源:utils.py

示例7: __init__

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
class ThreadPool:
    """Pool of threads consuming tasks from a queue."""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue."""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue."""
        self.tasks.join()
开发者ID:PeterJCLaw,项目名称:tools,代码行数:16,代码来源:threadpool.py

示例8: test_resolve

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
    def test_resolve(self):
        repo_fname = os.path.join(os.path.dirname(__file__), "results", "imagerepo.json")
        with open(repo_fname, "rb") as fi:
            repo = json.load(codecs.getreader("utf-8")(fi))
        uris = list(chain.from_iterable(six.itervalues(repo)))
        uri_list = deque()
        exceptions = deque()
        uri_queue = Queue()
        for uri in uris:
            if uri.startswith("https://scitools.github.io"):
                uri_queue.put(uri)
            else:
                msg = "{} is not a valid resource.".format(uri)
                exceptions.append(ValueError(msg))

        for i in range(MAXTHREADS):
            _ResolveWorkerThread(uri_queue, uri_list, exceptions).start()
        uri_queue.join()
        self.assertEqual(deque(), exceptions)
开发者ID:bjlittle,项目名称:iris,代码行数:21,代码来源:test_image_json.py

示例9: execute

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
def execute( trans, tool, param_combinations, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None ):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    all_jobs_timer = ExecutionTimer()
    execution_tracker = ToolExecutionTracker( tool, param_combinations, collection_info )
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(params):
        job_timer = ExecutionTimer()
        if workflow_invocation_uuid:
            params[ '__workflow_invocation_uuid__' ] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params[ '__workflow_invocation_uuid__' ]
        job, result = tool.handle_single_execution( trans, rerun_remap_job_id, params, history, collection_info, execution_cache )
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success( job, result )
        else:
            execution_tracker.record_error( result )

    config = app.config
    burst_at = getattr( config, 'tool_submission_burst_at', 10 )
    burst_threads = getattr( config, 'tool_submission_burst_threads', 1 )

    tool_action = tool.tool_action
    if hasattr( tool_action, "check_inputs_ready" ):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(
                tool,
                trans,
                params,
                history
            )

    job_count = len(execution_tracker.param_combinations)
    if job_count < burst_at or burst_threads < 2:
        for params in execution_tracker.param_combinations:
            execute_single_job(params)
    else:
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for params in execution_tracker.param_combinations:
            q.put(params)

        q.join()

    log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
    if collection_info:
        history = history or tool.get_default_history_by_trans( trans )
        if len(param_combinations) == 0:
            template = "Attempting to map over an empty collection, this is not yet implemented. colleciton_info is [%s]"
            message = template % collection_info
            log.warn(message)
            raise Exception(message)
        params = param_combinations[0]
        execution_tracker.create_output_collections( trans, history, params )

    return execution_tracker
开发者ID:ashvark,项目名称:galaxy,代码行数:78,代码来源:execute.py

示例10: execute

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
def execute(trans, tool, mapping_params, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None, invocation_step=None, max_num_jobs=None, job_callback=None, completed_jobs=None, workflow_resource_parameters=None):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    if max_num_jobs:
        assert invocation_step is not None
    if rerun_remap_job_id:
        assert invocation_step is None

    all_jobs_timer = ExecutionTimer()
    if invocation_step is None:
        execution_tracker = ToolExecutionTracker(trans, tool, mapping_params, collection_info)
    else:
        execution_tracker = WorkflowStepExecutionTracker(trans, tool, mapping_params, collection_info, invocation_step, job_callback=job_callback)
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(execution_slice, completed_job):
        job_timer = ExecutionTimer()
        params = execution_slice.param_combination
        if workflow_invocation_uuid:
            params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_invocation_uuid__']
        if workflow_resource_parameters:
            params['__workflow_resource_params__'] = workflow_resource_parameters
        elif '__workflow_resource_params__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_resource_params__']
        job, result = tool.handle_single_execution(trans, rerun_remap_job_id, execution_slice, history, execution_cache, completed_job)
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success(execution_slice, job, result)
        else:
            execution_tracker.record_error(result)

    tool_action = tool.tool_action
    if hasattr(tool_action, "check_inputs_ready"):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(
                tool,
                trans,
                params,
                history
            )

    execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template)
    config = app.config
    burst_at = getattr(config, 'tool_submission_burst_at', 10)
    burst_threads = getattr(config, 'tool_submission_burst_threads', 1)

    job_count = len(execution_tracker.param_combinations)

    jobs_executed = 0
    has_remaining_jobs = False

    if (job_count < burst_at or burst_threads < 2):
        for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                execute_single_job(execution_slice, completed_jobs[i])
    else:
        # TODO: re-record success...
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                q.put(execution_slice, completed_jobs[i])
                jobs_executed += 1

        q.join()

    if has_remaining_jobs:
        raise PartialJobExecution(execution_tracker)
    else:
        execution_tracker.finalize_dataset_collections(trans)

    log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
#.........这里部分代码省略.........
开发者ID:ImmPortDB,项目名称:immport-galaxy,代码行数:103,代码来源:execute.py

示例11: get_images

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]

#.........这里部分代码省略.........
                            config_changed = True
                else:
                    config_changed = True
        else:
            config_changed = True

    need_download = classes_changed or config_changed

    if need_download:

        if had_previous:
            trainer.logger.info("Reset dataset and re-download images to " + dir)
            if classes_changed:
                trainer.logger.info(" .. because classes changed in", meta['classes_md5'], classes_md5, meta_information_file)
            if config_changed:
                trainer.logger.info(" .. because settings changed in", meta_information_file)
        else:
            trainer.logger.info("Download images to " + dir)

        resize = bool(get_option(config, 'resize', True))
        if resize:
            resizeSize = (int(get_option(config, 'resizeWidth', 64)),
                          int(get_option(config, 'resizeHeight', 64)))
            trainer.logger.info(" .. with resizing to %dx%d " % resizeSize)

        # # we need to donwload all images
        shutil.rmtree(dataset_path)

        controller = {'running': True}
        try:
            for category in classes:
                max += len(category['images'])

            progress = trainer.job_backend.create_progress('dataset-download-images', max)
            progress.label('Download dataset images')

            for i in range(concurrent):
                t = ImageDownloaderWorker(q, progress, dataset, max, images, controller)
                t.daemon = True
                t.start()

            for category_idx, category in enumerate(classes):
                for image in category['images']:
                    local_name = image['id']
                    local_path = '%s/%s' % (trainer.job_model.get_dataset_downloads_dir(dataset), local_name)
                    q.put([image, category_idx, local_path])

            q.join()
            controller['running'] = False

            def move_image(image, category='training'):
                if image['id'] in images and os.path.isfile(images[image['id']]):
                    target_path = dataset_path + \
                        '/%s/category_%s/%s' % (category, category_idx,
                                                os.path.basename(images[image['id']]))
                    ensure_dir(os.path.dirname(target_path))
                    os.rename(images[image['id']], target_path)

            for category_idx, category in enumerate(classes):
                random.shuffle(category['images'])
                position = int(math.ceil(len(category['images']) * validationFactor))

                ensure_dir(dataset_path + '/training')
                ensure_dir(dataset_path + '/validation')

                for image in category['images'][position:]:  # test data
                    if image['id'] in images and os.path.isfile(images[image['id']]):
                        move_image(image, 'training')

                for image in category['images'][:position]:  # validation data
                    if image['id'] in images and os.path.isfile(images[image['id']]):
                        move_image(image, 'validation')

            with open(meta_information_file, 'w') as f:
                meta = {
                    'loaded_at': classes_md5,
                    'classes_md5': classes_md5,
                    'config': config
                }
                simplejson.dump(meta, f, default=invalid_json_values)

        except KeyboardInterrupt:
            controller['running'] = False
            sys.exit(1)
    else:
        trainer.logger.info("Downloaded images up2date in " + dir)
        trainer.logger.info(" - Remove this directory if you want to re-download all images of your dataset and re-shuffle training/validation images.")

    trainer.output_size = len(classes)

    # change to type local_images
    dataset_transformed = dataset.copy()
    dataset_transformed['config']['path'] = dir

    all_memory = get_option(dataset['config'], 'allMemory', False, 'bool')

    if all_memory:
        return read_images_in_memory(job_model, dataset_transformed, node, trainer)
    else:
        return read_images_keras_generator(job_model, dataset_transformed, node, trainer)
开发者ID:aetros,项目名称:aetros-cli,代码行数:104,代码来源:auto_dataset.py

示例12: read_images_in_memory

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
def read_images_in_memory(job_model, dataset, node, trainer):
    """
    Reads all images into memory and applies augmentation if enabled
    """
    concurrent = psutil.cpu_count()

    dataset_config = dataset['config']
    controller = {'running': True}
    q = Queue(concurrent)

    result = {
        'X_train': [],
        'Y_train': [],
        'X_test': [],
        'Y_test': []
    }

    images = []
    max = 0

    path = job_model.get_dataset_downloads_dir(dataset)
    if 'path' in dataset['config']:
        path = dataset['config']['path']

    classes_count = 0
    category_map = {}
    classes = []

    trainer.set_status('LOAD IMAGES INTO MEMORY')

    try:
        for i in range(concurrent):
            t = ImageReadWorker(q, job_model, node, path, images, controller)
            t.daemon = True
            t.start()

        for validation_or_training in ['validation', 'training']:
            if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
                for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
                    if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):

                        if category_name not in category_map:
                            category_map[category_name] = classes_count
                            if 'classes' in dataset_config and 'category_' in category_name:
                                category_idx = int(category_name.replace('category_', ''))
                                category_map[category_name] = category_idx
                                target_category = dataset_config['classes'][category_idx]
                                classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
                            else:
                                classes.append(category_name)

                            classes_count += 1

                        for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
                            file_path = os.path.join(path, validation_or_training, category_name, id)
                            q.put([file_path, validation_or_training == 'validation', category_name])
                            max += 1

        q.join()
        controller['running'] = False

        train_images = []
        test_images = []

        for v in images:
            image, validation, category_dir = v
            if validation is True:
                test_images.append([image, category_map[category_dir]])
            else:
                train_images.append([image, category_map[category_dir]])

        train_datagen = None
        augmentation = bool(get_option(dataset_config, 'augmentation', False))
        if augmentation:
            train_datagen = get_image_data_augmentor_from_dataset(dataset)

        train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])

        test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])

        nb_sample = len(train_images)
        trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
        trainer.set_generator_training_nb(nb_sample)
        trainer.set_generator_validation_nb(len(test_images))

        trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
               (classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))

        if classes_count == 0:
            trainer.logger.warning("Could not find any classes. Does the directory contains images?")
            sys.exit(1)

        trainer.output_size = classes_count
        trainer.set_info('classes', classes)
        trainer.classes = classes

        result['X_train'] = train
        result['Y_train'] = train
        result['X_test'] = test
        result['Y_test'] = test
#.........这里部分代码省略.........
开发者ID:aetros,项目名称:aetros-cli,代码行数:103,代码来源:auto_dataset.py

示例13: GraphiteReporter

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]

#.........这里部分代码省略.........
    """Run the thread."""
    while True:
      try:
        try:
          name, value, valueType, stamp = self.queue.get()
        except TypeError:
          break
        self.log(name, value, valueType, stamp)
      finally:
        self.queue.task_done()


  def connect(self):
    """Connects to the Graphite server if not already connected."""
    if self.sock is not None:
      return
    backoff = 0.01
    while True:
      try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(5)
        sock.connect((self.host, self.port))
        self.sock = sock
        return
      except socket.error:
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)


  def disconnect(self):
    """Disconnect from the Graphite server if connected."""
    if self.sock is not None:
      try:
        self.sock.close()
      except socket.error:
        pass
      finally:
        self.sock = None


  def _sendMsg(self, msg):
    """Send a line to graphite. Retry with exponential backoff."""
    if not self.sock:
      self.connect()
    if not isinstance(msg, binary_type):
      msg = msg.encode("UTF-8")

    backoff = 0.001
    while True:
      try:
        self.sock.sendall(msg)
        break
      except socket.error:
        log.warning('Graphite connection error', exc_info = True)
        self.disconnect()
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)
        self.connect()


  def _sanitizeName(self, name):
    """Sanitize a metric name."""
    return name.replace(' ', '-')


  def log(self, name, value, valueType=None, stamp=None):
    """Log a named numeric value. The value type may be 'value',
    'count', or None."""
    if type(value) == float:
      form = "%s%s %2.2f %d\n"
    else:
      form = "%s%s %s %d\n"

    if valueType is not None and len(valueType) > 0 and valueType[0] != '.':
      valueType = '.' + valueType

    if not stamp:
      stamp = time.time()

    self._sendMsg(form % (self._sanitizeName(name), valueType or '', value, stamp))


  def enqueue(self, name, value, valueType=None, stamp=None):
    """Enqueue a call to log."""
    # If queue is too large, refuse to log.
    if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
      return
    # Stick arguments into the queue
    self.queue.put((name, value, valueType, stamp))


  def flush(self):
    """Block until all stats have been sent to Graphite."""
    self.queue.join()


  def shutdown(self):
    """Shut down the background thread."""
    self.queue.put(None)
    self.flush()
开发者ID:Cue,项目名称:scales,代码行数:104,代码来源:util.py

示例14: Rigger

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]

#.........这里部分代码省略.........
            self.log_message(msg)

    def start_server(self):
        """
        Starts the ZMQ server if the ``server_enabled`` is True in the config.
        """
        self._server_hostname = self.config.get('server_address', '127.0.0.1')
        self._server_port = self.config.get('server_port', 21212)
        self._server_enable = self.config.get('server_enabled', False)
        if self._server_enable:
            zmq_socket_address = 'tcp://{}:{}'.format(self._server_hostname, self._server_port)
            # set up reciever thread for zmq event handling
            zeh = threading.Thread(
                target=self.zmq_event_handler, args=(zmq_socket_address,), name="zmq_event_handler")
            zeh.start()
            exect = threading.Thread(target=self.await_shutdown, name="executioner")
            exect.start()

    def await_shutdown(self):
        while not self._server_shutdown:
            time.sleep(0.3)
        self.stop_server()

    def stop_server(self):
        """
        Responsible for the following:
            - stopping the zmq event handler (unless already stopped through 'terminate')
            - stopping the global queue
            - stopping the background queue
        """
        self.log_message("Shutdown initiated : {}".format(self._server_hostname))
        # The order here is important
        self._zmq_event_handler_shutdown = True
        self._global_queue.join()
        self._global_queue_shutdown = True
        self._background_queue.join()
        self._background_queue_shutdown = True
        raise SystemExit

    def fire_hook(self, hook_name, **kwargs):
        """
        Parses the hook information into a dict for passing to process_hook. This is used
        to enable both the TCP and in-object fire_hook methods to use the same process_hook
        method call.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.

        """
        json_dict = {'hook_name': hook_name, 'data': kwargs}
        self._fire_internal_hook(json_dict)

    def _fire_internal_hook(self, json_dict):
        task = Task(json_dict)
        tid = task.tid.hexdigest()
        self._task_list[tid] = task
        if self._global_queue:
            with self._queue_lock:
                self._global_queue.put(tid)
            return tid
        else:
            return None

    def process_hook(self, hook_name, **kwargs):
        """
开发者ID:psav,项目名称:riggerlib,代码行数:70,代码来源:server.py

示例15: ThreadPool

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import join [as 别名]
class ThreadPool(object):

    def __init__(self, thread_manager, thread_count=10):
        """Initialization method

        :param thread_manager: the thread manager to use
        :param thread_count: the number of workers to instantiate
        """
        self.logger = logging.getLogger(
            'storj.downstream_farmer.utils.ThreadPool')
        self.tasks = Queue()
        self.thread_manager = thread_manager
        self.workers = list()
        self.workers_lock = threading.Lock()
        for i in range(0, thread_count):
            self._add_thread()
        # managed monitor thread
        self.monitor_thread = self.thread_manager.create_thread(
            name='MonitorThread',
            target=self._monitor)
        self.load_minimum = 0.01
        self.load_maximum = 0.25

    def thread_count(self):
        with self.workers_lock:
            return len(self.workers)

    def _add_thread(self):
        # unmanaged worker threads
        self.logger.debug(
            '{0} : adding worker'.format(threading.current_thread()))
        worker = WorkerThread(self)
        with self.workers_lock:
            self.workers.append(worker)
        return worker

    def _remove_thread(self):
        with self.workers_lock:
            if (len(self.workers) > 1):
                self.logger.debug(
                    '{0} : removing worker'.format(threading.current_thread()))
                # make sure to retain one worker
                thread = self.workers.popleft()
                thread.stop()

    def calculate_loading(self):
        total_time = 0
        work_time = 0
        with self.workers_lock:
            for w in self.workers:
                total_time += w.load_tracker.total_time()
                work_time += w.load_tracker.work_time()
        if (total_time > 0):
            load = float(work_time) / float(total_time)
        else:
            load = 0
        return load

    def max_load(self):
        max = 0
        with self.workers_lock:
            for w in self.workers:
                load = w.load_tracker.load()
                if (load > max):
                    max = load
        return max

    def check_loading(self):
        self.monitor_thread.wake()

    def _monitor(self):
        """This runs until the thread manager wakes it up during
        shutdown, at which time it will wait for any unfinished work in the
        queue, and then finish, allowing the program to exit
        """
        # wait until shutdown is called
        while (self.thread_manager.running):
            # check loading every second to see if we should add another
            # thread.
            load = self.calculate_loading()
            if (load > self.load_maximum):
                worker = self._add_thread()
                worker.start()
            elif (load < self.load_minimum):
                self._remove_thread()
            self.thread_manager.sleep(10)
        # wait for any existing work to finish
        self.logger.debug('MonitorThread waiting for tasks to finish')
        self.tasks.join()
        self.logger.debug('MonitorThread finishing')
        # now, managed thread can exit so program can close cleanly

    def put_work(self, target, args=[], kwargs={}):
        """Puts work in the work queue.
        :param work: callable work object
        """
        self.tasks.put(WorkItem(target, args, kwargs))

    def start(self):
        """Starts the thread pool and all its workers and the monitor thread
#.........这里部分代码省略.........
开发者ID:wiggzz,项目名称:downstream-farmer,代码行数:103,代码来源:utils.py


注:本文中的six.moves.queue.Queue.join方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。