当前位置: 首页>>代码示例>>Python>>正文


Python request.RequestPool类代码示例

本文整理汇总了Python中lazyflow.request.RequestPool的典型用法代码示例。如果您正苦于以下问题:Python RequestPool类的具体用法?Python RequestPool怎么用?Python RequestPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了RequestPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute

    def execute(self, slot, subindex, roi, result):
        featMatrix=[]
        labelsMatrix=[]
        for i,labels in enumerate(self.inputs["Labels"]):
            if labels.meta.shape is not None:
                labels=labels[:].wait()

                indexes=numpy.nonzero(labels[...,0].view(numpy.ndarray))
                #Maybe later request only part of the region?

                image=self.inputs["Images"][i][:].wait()

                features=image[indexes]
                labels=labels[indexes]

                featMatrix.append(features)
                labelsMatrix.append(labels)


        featMatrix=numpy.concatenate(featMatrix,axis=0)
        labelsMatrix=numpy.concatenate(labelsMatrix,axis=0)

        # train and store self._forest_count forests in parallel
        pool = RequestPool()
        for i in range(self._forest_count):
            def train_and_store(number):
                result[number] = vigra.learning.RandomForest(self._tree_count)
                result[number].learnRF(featMatrix.astype(numpy.float32),labelsMatrix.astype(numpy.uint32))
            req = pool.request(partial(train_and_store, i))

        pool.wait()

        return result
开发者ID:bheuer,项目名称:lazyflow,代码行数:33,代码来源:classifierOperators.py

示例2: create_and_train

    def create_and_train(self, X, y):
        logger.debug( "Training parallel vigra RF" )
        # Save for future reference
        known_labels = numpy.unique(y)

        X = numpy.asarray(X, numpy.float32)
        y = numpy.asarray(y, numpy.uint32)
        if y.ndim == 1:
            y = y[:, numpy.newaxis]

        assert X.ndim == 2
        assert len(X) == len(y)

        # Create N forests
        forests = []
        for _ in range(self._num_forests):
            forest = vigra.learning.RandomForest(self._trees_per_forest, **self._kwargs)
            forests.append( forest )

        # Train them all in parallel
        pool = RequestPool()
        for forest in forests:
            pool.add( Request( partial(forest.learnRF, X, y) ) )
        pool.wait()

        return ParallelVigraRfLazyflowClassifier( forests, known_labels )
开发者ID:burcin,项目名称:lazyflow,代码行数:26,代码来源:parallelVigraRfLazyflowClassifier.py

示例3: execute

    def execute(self, slot, subindex, roi, result):
        clipped_block_rois = getIntersectingRois(
            self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), True
        )
        if self._always_request_full_blocks:
            full_block_rois = getIntersectingRois(
                self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), False
            )
        else:
            full_block_rois = clipped_block_rois

        pool = RequestPool()
        for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois):
            full_block_roi = numpy.asarray(full_block_roi)
            clipped_block_roi = numpy.asarray(clipped_block_roi)

            req = self.Input(*full_block_roi)
            output_roi = numpy.asarray(clipped_block_roi) - roi.start
            if (full_block_roi == clipped_block_roi).all():
                req.writeInto(result[roiToSlice(*output_roi)])
            else:
                roi_within_block = clipped_block_roi - full_block_roi[0]

                def copy_request_result(output_roi, roi_within_block, request_result):
                    self.Output.stype.copy_data(
                        result[roiToSlice(*output_roi)], request_result[roiToSlice(*roi_within_block)]
                    )

                req.notify_finished(partial(copy_request_result, output_roi, roi_within_block))
            pool.add(req)
            del req
        pool.wait()
开发者ID:ilastik,项目名称:lazyflow,代码行数:32,代码来源:opSplitRequestsBlockwise.py

示例4: _executeOutput

    def _executeOutput(self, roi, destination):
        assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
        assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
        
        block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
        block_starts = map( tuple, block_starts )

        # Ensure all block cache files are up-to-date
        reqPool = RequestPool() # (Do the work in parallel.)
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )
            f = partial( self._ensureCached, entire_block_roi)
            reqPool.add( Request(f) )
        logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
        reqPool.wait()

        # Copy data from each block
        # (Parallelism not needed here: h5py will serialize these requests anyway)
        logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
        for block_start in block_starts:
            entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )

            # This block's portion of the roi
            intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
            
            # Compute slicing within destination array and slicing within this block
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
            block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
            
            # Copy from block to destination
            dataset = self._getBlockDataset( entire_block_roi )
            destination[ roiToSlice(*destination_relative_intersection) ] = dataset[ roiToSlice( *block_relative_intersection ) ]
        return destination
开发者ID:bheuer,项目名称:lazyflow,代码行数:33,代码来源:opCompressedCache.py

示例5: execute_tasks

def execute_tasks( tasks ):
    """
    Executes the given list of tasks (functions) in the lazyflow threadpool.
    """
    pool = RequestPool()
    for task in tasks:
        pool.add( Request(task) )
    pool.wait()
开发者ID:DerThorsten,项目名称:ilastik,代码行数:8,代码来源:simple_predict.py

示例6: testBasic

    def testBasic(self):
        graph = Graph()
        opDataProvider = OpArrayPiperWithAccessCount(graph=graph)
        opCache = OpUnblockedArrayCache(graph=graph)

        data = np.random.random((100, 100, 100)).astype(np.float32)
        opDataProvider.Input.setValue(vigra.taggedView(data, "zyx"))
        opCache.Input.connect(opDataProvider.Output)

        assert opCache.CleanBlocks.value == []

        roi = ((30, 30, 30), (50, 50, 50))
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]

        # Request the same data a second time.
        # Access count should not change.
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 1
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]

        # Now invalidate a part of the data
        # The cache will discard it, so the access count should increase.
        opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
        assert opCache.CleanBlocks.value == []
        cache_data = opCache.Output(*roi).wait()
        assert (cache_data == data[roiToSlice(*roi)]).all()
        assert opDataProvider.accessCount == 2

        # Repeat this next part just for safety
        for _ in range(10):
            # Make sure the cache is empty
            opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
            opDataProvider.accessCount = 0

            # Create many requests for the same data.
            # Upstream data should only be accessed ONCE.
            pool = RequestPool()
            for _ in range(10):
                pool.add(opCache.Output(*roi))
            pool.wait()
            assert opDataProvider.accessCount == 1

        # Also, make sure requests for INNER rois of stored blocks are also serviced from memory
        opDataProvider.accessCount = 0
        inner_roi = ((35, 35, 35), (45, 45, 45))
        cache_data = opCache.Output(*inner_roi).wait()
        assert (cache_data == data[roiToSlice(*inner_roi)]).all()
        assert opDataProvider.accessCount == 0
        assert opCache.CleanBlocks.value == [roiToSlice(*roi)]
开发者ID:ilastik,项目名称:lazyflow,代码行数:53,代码来源:testOpUnblockedArrayCache.py

示例7: _waitForBlocks

 def _waitForBlocks(self, block_starts):
     """
     Make sure that all blocks in the given list of blocks are present in the cache before returning.
     (Blocks that are not yet present will be requested from our Input slot.)
     """
     reqPool = RequestPool() # (Do the work in parallel.)
     for block_start in block_starts:
         entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )
         f = partial( self._ensureCached, entire_block_roi)
         reqPool.add( Request(f) )
     logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
     reqPool.wait()
开发者ID:CVML,项目名称:lazyflow,代码行数:12,代码来源:opCompressedCache.py

示例8: export

        def export(self, filename, hypothesesGraph, pluginExportContext):
            """Export the tracking solution stored in the hypotheses graph as a sequence of H5 files,
            one per frame, containing the label image of that frame and which objects were part
            of a move or a division.
    
            :param filename: string of the FOLDER where to save the result
            :param hypothesesGraph: hytra.core.hypothesesgraph.HypothesesGraph filled with a solution
            :param pluginExportContext: instance of ilastik.plugins.PluginExportContext containing:
                labelImageSlot (required here) as well as objectFeaturesSlot, rawImageSlot, additionalPluginArgumentsSlot

            :returns: True on success, False otherwise
            """
            labelImageSlot = pluginExportContext.labelImageSlot
            traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hypothesesGraph.getMappingsBetweenUUIDsAndTraxels()
            timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
    
            result = hypothesesGraph.getSolutionDictionary()
            mergers, detections, links, divisions = getMergersDetectionsLinksDivisions(result, uuidToTraxelMap)
    
            # group by timestep for event creation
            mergersPerTimestep = getMergersPerTimestep(mergers, timesteps)
            linksPerTimestep = getLinksPerTimestep(links, timesteps)
            detectionsPerTimestep = getDetectionsPerTimestep(detections, timesteps)
            divisionsPerTimestep = getDivisionsPerTimestep(divisions, linksPerTimestep, timesteps)
    
            # save to disk in parallel
            pool = RequestPool()
    
            timeIndex = labelImageSlot.meta.axistags.index('t')

            if not os.path.exists(filename):
                os.makedirs(filename)
    
            for timestep in traxelIdPerTimestepToUniqueIdMap.keys():
                # extract current frame lable image
                roi = [slice(None) for i in range(len(labelImageSlot.meta.shape))]
                roi[timeIndex] = slice(int(timestep), int(timestep)+1)
                roi = tuple(roi)
                labelImage = labelImageSlot[roi].wait()
    
                fn = os.path.join(filename, "{0:05d}.h5".format(int(timestep)))
                pool.add(Request(partial(writeEvents,
                                            int(timestep),
                                             linksPerTimestep[timestep],
                                             divisionsPerTimestep[timestep],
                                             mergersPerTimestep[timestep],
                                             detectionsPerTimestep[timestep],
                                             fn,
                                             labelImage)))
            pool.wait()
    
            return True
开发者ID:ilastik,项目名称:ilastik,代码行数:52,代码来源:tracking_h5_event_export.py

示例9: _execute_Output

    def _execute_Output(self, slot, subindex, roi, result):
        """
        Overridden from OpUnblockedArrayCache
        """

        def copy_block(full_block_roi, clipped_block_roi):
            full_block_roi = numpy.asarray(full_block_roi)
            clipped_block_roi = numpy.asarray(clipped_block_roi)
            output_roi = numpy.asarray(clipped_block_roi) - roi.start

            block_roi = self._get_containing_block_roi(clipped_block_roi)

            # Skip cache and copy full block directly
            if self.BypassModeEnabled.value:
                full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi))

                self.Input(*full_block_roi).writeInto(full_block_data).block()

                roi_within_block = clipped_block_roi - full_block_roi[0]
                self.Output.stype.copy_data(
                    result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)]
                )
            # If data data exists already or we can just fetch it without needing extra scratch space,
            # just call the base class
            elif block_roi is not None or (full_block_roi == clipped_block_roi).all():
                self._execute_Output_impl(clipped_block_roi, result[roiToSlice(*output_roi)])
            elif self.Input.meta.dontcache:
                # Data isn't in the cache, but we don't need it in the cache anyway.
                self.Input(*clipped_block_roi).writeInto(result[roiToSlice(*output_roi)]).block()
            else:
                # Data doesn't exist yet in the cache.
                # Request the full block, but then discard the parts we don't need.

                # (We use allocateDestination() here to support MaskedArray types.)
                # TODO: We should probably just get rid of MaskedArray support altogether...
                full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi))
                self._execute_Output_impl(full_block_roi, full_block_data)

                roi_within_block = clipped_block_roi - full_block_roi[0]
                self.Output.stype.copy_data(
                    result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)]
                )

        clipped_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), True)
        full_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), False)

        pool = RequestPool()
        for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois):
            req = Request(partial(copy_block, full_block_roi, clipped_block_roi))
            pool.add(req)
        pool.wait()
开发者ID:ilastik,项目名称:lazyflow,代码行数:51,代码来源:opSimpleBlockedArrayCache.py

示例10: execute

    def execute(self, slot, subindex, roi, result):
        assert len(roi.start) == len(roi.stop) == len(self.Output.meta.shape)
        assert slot == self.Output

        t_ind = self.RawVolume.meta.axistags.index('t')
        assert t_ind < len(self.RawVolume.meta.shape)

        def compute_features_for_time_slice(res_t_ind, t):
            axes4d = [k for k in self.RawVolume.meta.getTaggedShape().keys() if k in 'xyzc']

            # Process entire spatial volume
            s = [slice(None)] * len(self.RawVolume.meta.shape)
            s[t_ind] = slice(t, t+1)
            s = tuple(s)

            # Request in parallel
            raw_req = self.RawVolume[s]
            raw_req.submit()

            label_req = self.LabelVolume[s]
            label_req.submit()

            if self.Atlas.ready():
                atlasVolume = self.Atlas[s].wait()
                atlasVolume = vigra.taggedView(atlasVolume, axistags=self.Atlas.meta.axistags)
                atlasVolume = atlasVolume.withAxes(*axes4d)
            else:
                atlasVolume = None

            # Get results
            rawVolume = raw_req.wait()
            labelVolume = label_req.wait()

            rawVolume = vigra.taggedView(rawVolume, axistags=self.RawVolume.meta.axistags)
            labelVolume = vigra.taggedView(labelVolume, axistags=self.LabelVolume.meta.axistags)

            # Convert to 4D (preserve axis order)
            rawVolume = rawVolume.withAxes(*axes4d)
            labelVolume = labelVolume.withAxes(*axes4d)
            acc = self._extract(rawVolume, labelVolume, atlasVolume)

            # Copy into the result
            result[res_t_ind] = acc

        # loop over requested time slices
        pool = RequestPool()
        for res_t_ind, t in enumerate(range(roi.start[t_ind], roi.stop[t_ind])):
            pool.add( Request( partial(compute_features_for_time_slice, res_t_ind, t) ) )
        
        pool.wait()
        return result
开发者ID:ilastik,项目名称:ilastik,代码行数:51,代码来源:opObjectExtraction.py

示例11: _label

    def _label(self, roi, result):
        result = vigra.taggedView(result, axistags=self.Output.meta.axistags)
        # get the background values
        bg = self.Background[...].wait()
        bg = vigra.taggedView(bg, axistags=self.Background.meta.axistags)
        bg = bg.withAxes(*'ct')
        assert np.all(self.Background.meta.shape[3:] ==
                      self.Input.meta.shape[3:]),\
            "Shape of background values incompatible to shape of Input"

        # do labeling in parallel over channels and time slices
        pool = RequestPool()

        start = np.asarray(roi.start, dtype=np.int)
        stop = np.asarray(roi.stop, dtype=np.int)
        for ti, t in enumerate(range(roi.start[4], roi.stop[4])):
            start[4], stop[4] = t, t+1
            for ci, c in enumerate(range(roi.start[3], roi.stop[3])):
                start[3], stop[3] = c, c+1
                newRoi = SubRegion(self.Output,
                                   start=tuple(start), stop=tuple(stop))
                resView = result[..., ci, ti].withAxes(*'xyz')
                req = Request(partial(self._label3d, newRoi,
                                      bg[c, t], resView))
                pool.add(req)

        logger.debug(
            "{}: Computing connected components for ROI {} ...".format(
                self.name, roi))
        pool.wait()
        pool.clean()
        logger.debug("{}: Connected components computed.".format(
            self.name))
开发者ID:burcin,项目名称:lazyflow,代码行数:33,代码来源:opLabelVolume.py

示例12: execute

    def execute(self, slot, subindex, roi, result):
        assert slot == self._ReorderedOutput
        pool = RequestPool()

        t_ind = 0
        for t in range(roi.start[0], roi.stop[0]):
            c_ind = 0
            for c in range(roi.start[-1], roi.stop[-1]):
                newroi = roi.copy()
                newroi.start[0] = t
                newroi.stop[0] = t+1
                newroi.start[-1] = c
                newroi.stop[-1] = c+1

                req = self._op.Output.get(newroi)
                resView = result[t_ind:t_ind+1, ..., c_ind:c_ind+1]
                req.writeInto(resView)

                pool.add(req)

                c_ind += 1

            t_ind += 1

        pool.wait()
        pool.clean()
开发者ID:burcin,项目名称:ilastik,代码行数:26,代码来源:opThresholdTwoLevels.py

示例13: _train_forests

    def _train_forests(forests, X, y):
        """
        Train all RFs (in parallel), and return the oobs.
        """
        oobs = [None] * len(forests)
        def store_oob_results(i, oob):
            oobs[i] = oob

        with Timer() as train_timer:
            pool = RequestPool()
            for i, forest in enumerate(forests):
                req = Request( partial(forest.learnRF, X, y) )
                # save the oob results
                req.notify_finished( partial( store_oob_results, i ) )
                pool.add( req )
            pool.wait()          
        logger.info("Training took, {} seconds".format( train_timer.seconds() ) )
        return oobs
开发者ID:stuarteberg,项目名称:lazyflow,代码行数:18,代码来源:parallelVigraRfLazyflowClassifier.py

示例14: execute

    def execute(self, slot, subindex, roi, result):
        assert slot == self.ConcatenatedOutput
        self.progressSignal(0.0)

        num_dirty_slots = len(self._dirty_slots)
        subtask_progress = {}
        progress_lock = RequestLock()

        def forward_progress_updates(feature_slot, progress):
            with progress_lock:
                subtask_progress[feature_slot] = progress
                total_progress = 0.95 * sum(subtask_progress.values()) / num_dirty_slots
            self.progressSignal(total_progress)

        logger.debug(
            "Updating features for {} dirty images out of {}"
            "".format(len(self._dirty_slots), len(self.FeatureMatrices))
        )

        pool = RequestPool()
        subresults = []
        for feature_slot, progress_slot in zip(self.FeatureMatrices, self.ProgressSignals):
            subresults.append([None])
            req = feature_slot[:]
            req.writeInto(subresults[-1])

            # Only use progress for slots that were dirty.
            # The others are going to be really fast.
            if feature_slot in self._dirty_slots:
                sub_progress_signal = progress_slot.value
                sub_progress_signal.subscribe(partial(forward_progress_updates, feature_slot))
            pool.add(req)
        pool.wait()

        # Reset dirty slots
        self._dirty_slots = set()

        # Since the subresults are returned in 'value' slots,
        #  we have to unpack them from their single-element lists.
        subresult_list = list(itertools.chain(*subresults))

        total_matrix = numpy.concatenate(subresult_list, axis=0)
        self.progressSignal(100.0)
        result[0] = total_matrix
开发者ID:ilastik,项目名称:lazyflow,代码行数:44,代码来源:opConcatenateFeatureMatrices.py

示例15: execute

    def execute(self, slot, subindex, roi, result):
        def compute_for_channel(output_channel, input_channel):
            input_roi = numpy.array((roi.start, roi.stop))
            input_roi[:, -1] = (input_channel, input_channel + 1)
            input_req = self.Input(*input_roi)

            # If possible, use the result array itself as a scratch area
            if self.Input.meta.dtype == result.dtype:
                input_req.writeInto(result[..., output_channel : output_channel + 1])

            input_data = input_req.wait()
            input_data = input_data.astype(numpy.float32, order="C", copy=False)
            input_data = input_data[..., 0]  # drop channel axis
            result[..., output_channel] = computeIntegralImage(input_data)

        pool = RequestPool()
        for output_channel, input_channel in enumerate(range(roi.start[-1], roi.stop[-1])):
            pool.add(Request(partial(compute_for_channel, output_channel, input_channel)))
        pool.wait()
开发者ID:CVML,项目名称:ilastik,代码行数:19,代码来源:opIIBoostFeatureSelection.py


注:本文中的lazyflow.request.RequestPool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。