本文整理汇总了Python中lazyflow.request.RequestPool.add方法的典型用法代码示例。如果您正苦于以下问题:Python RequestPool.add方法的具体用法?Python RequestPool.add怎么用?Python RequestPool.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lazyflow.request.RequestPool
的用法示例。
在下文中一共展示了RequestPool.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_and_train
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def create_and_train(self, X, y):
logger.debug( "Training parallel vigra RF" )
# Save for future reference
known_labels = numpy.unique(y)
X = numpy.asarray(X, numpy.float32)
y = numpy.asarray(y, numpy.uint32)
if y.ndim == 1:
y = y[:, numpy.newaxis]
assert X.ndim == 2
assert len(X) == len(y)
# Create N forests
forests = []
for _ in range(self._num_forests):
forest = vigra.learning.RandomForest(self._trees_per_forest, **self._kwargs)
forests.append( forest )
# Train them all in parallel
pool = RequestPool()
for forest in forests:
pool.add( Request( partial(forest.learnRF, X, y) ) )
pool.wait()
return ParallelVigraRfLazyflowClassifier( forests, known_labels )
示例2: _label
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def _label(self, roi, result):
result = vigra.taggedView(result, axistags=self.Output.meta.axistags)
# get the background values
bg = self.Background[...].wait()
bg = vigra.taggedView(bg, axistags=self.Background.meta.axistags)
bg = bg.withAxes(*'ct')
assert np.all(self.Background.meta.shape[3:] ==
self.Input.meta.shape[3:]),\
"Shape of background values incompatible to shape of Input"
# do labeling in parallel over channels and time slices
pool = RequestPool()
start = np.asarray(roi.start, dtype=np.int)
stop = np.asarray(roi.stop, dtype=np.int)
for ti, t in enumerate(range(roi.start[4], roi.stop[4])):
start[4], stop[4] = t, t+1
for ci, c in enumerate(range(roi.start[3], roi.stop[3])):
start[3], stop[3] = c, c+1
newRoi = SubRegion(self.Output,
start=tuple(start), stop=tuple(stop))
resView = result[..., ci, ti].withAxes(*'xyz')
req = Request(partial(self._label3d, newRoi,
bg[c, t], resView))
pool.add(req)
logger.debug(
"{}: Computing connected components for ROI {} ...".format(
self.name, roi))
pool.wait()
pool.clean()
logger.debug("{}: Connected components computed.".format(
self.name))
示例3: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute(self, slot, subindex, roi, result):
clipped_block_rois = getIntersectingRois(
self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), True
)
if self._always_request_full_blocks:
full_block_rois = getIntersectingRois(
self.Input.meta.shape, self.BlockShape.value, (roi.start, roi.stop), False
)
else:
full_block_rois = clipped_block_rois
pool = RequestPool()
for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois):
full_block_roi = numpy.asarray(full_block_roi)
clipped_block_roi = numpy.asarray(clipped_block_roi)
req = self.Input(*full_block_roi)
output_roi = numpy.asarray(clipped_block_roi) - roi.start
if (full_block_roi == clipped_block_roi).all():
req.writeInto(result[roiToSlice(*output_roi)])
else:
roi_within_block = clipped_block_roi - full_block_roi[0]
def copy_request_result(output_roi, roi_within_block, request_result):
self.Output.stype.copy_data(
result[roiToSlice(*output_roi)], request_result[roiToSlice(*roi_within_block)]
)
req.notify_finished(partial(copy_request_result, output_roi, roi_within_block))
pool.add(req)
del req
pool.wait()
示例4: _executeOutput
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def _executeOutput(self, roi, destination):
assert len(roi.stop) == len(self.Input.meta.shape), "roi: {} has the wrong number of dimensions for Input shape: {}".format( roi, self.Input.meta.shape )
assert numpy.less_equal(roi.stop, self.Input.meta.shape).all(), "roi: {} is out-of-bounds for Input shape: {}".format( roi, self.Input.meta.shape )
block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
block_starts = map( tuple, block_starts )
# Ensure all block cache files are up-to-date
reqPool = RequestPool() # (Do the work in parallel.)
for block_start in block_starts:
entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )
f = partial( self._ensureCached, entire_block_roi)
reqPool.add( Request(f) )
logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
reqPool.wait()
# Copy data from each block
# (Parallelism not needed here: h5py will serialize these requests anyway)
logger.debug( "Copying data from {} blocks...".format( len(block_starts) ) )
for block_start in block_starts:
entire_block_roi = getBlockBounds( self.Input.meta.shape, self._blockshape, block_start )
# This block's portion of the roi
intersecting_roi = getIntersection( (roi.start, roi.stop), entire_block_roi )
# Compute slicing within destination array and slicing within this block
destination_relative_intersection = numpy.subtract(intersecting_roi, roi.start)
block_relative_intersection = numpy.subtract(intersecting_roi, block_start)
# Copy from block to destination
dataset = self._getBlockDataset( entire_block_roi )
destination[ roiToSlice(*destination_relative_intersection) ] = dataset[ roiToSlice( *block_relative_intersection ) ]
return destination
示例5: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute(self, slot, subindex, roi, result):
assert slot == self._ReorderedOutput
pool = RequestPool()
t_ind = 0
for t in range(roi.start[0], roi.stop[0]):
c_ind = 0
for c in range(roi.start[-1], roi.stop[-1]):
newroi = roi.copy()
newroi.start[0] = t
newroi.stop[0] = t+1
newroi.start[-1] = c
newroi.stop[-1] = c+1
req = self._op.Output.get(newroi)
resView = result[t_ind:t_ind+1, ..., c_ind:c_ind+1]
req.writeInto(resView)
pool.add(req)
c_ind += 1
t_ind += 1
pool.wait()
pool.clean()
示例6: execute_tasks
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute_tasks( tasks ):
"""
Executes the given list of tasks (functions) in the lazyflow threadpool.
"""
pool = RequestPool()
for task in tasks:
pool.add( Request(task) )
pool.wait()
示例7: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute(self, slot, subindex, rroi, result):
key = roiToSlice(rroi.start,rroi.stop)
cnt = 0
written = 0
start, stop = roi.sliceToRoi(key, self.outputs["Output"].meta.shape)
assert (stop<=self.outputs["Output"].meta.shape).all()
#axisindex = self.inputs["AxisIndex"].value
flag = self.inputs["AxisFlag"].value
axisindex = self.outputs["Output"].meta.axistags.index(flag)
#ugly-ugly-ugly
oldkey = list(key)
oldkey.pop(axisindex)
#print "STACKER: ", flag, axisindex
#print "requesting an outslot from stacker:", key, result.shape
#print "input slots total: ", len(self.inputs['Images'])
requests = []
pool = RequestPool()
for i, inSlot in enumerate(self.inputs['Images']):
req = None
inTagKeys = [ax.key for ax in inSlot.meta.axistags]
if flag in inTagKeys:
slices = inSlot.meta.shape[axisindex]
if cnt + slices >= start[axisindex] and start[axisindex]-cnt<slices and start[axisindex]+written<stop[axisindex]:
begin = 0
if cnt < start[axisindex]:
begin = start[axisindex] - cnt
end = slices
if cnt + end > stop[axisindex]:
end -= cnt + end - stop[axisindex]
key_ = copy.copy(oldkey)
key_.insert(axisindex, slice(begin, end, None))
reskey = [slice(None, None, None) for x in range(len(result.shape))]
reskey[axisindex] = slice(written, written+end-begin, None)
req = inSlot[tuple(key_)].writeInto(result[tuple(reskey)])
written += end - begin
cnt += slices
else:
if cnt>=start[axisindex] and start[axisindex] + written < stop[axisindex]:
#print "key: ", key, "reskey: ", reskey, "oldkey: ", oldkey
#print "result: ", result.shape, "inslot:", inSlot.meta.shape
reskey = [slice(None, None, None) for s in oldkey]
reskey.insert(axisindex, written)
destArea = result[tuple(reskey)]
req = inSlot[tuple(oldkey)].writeInto(destArea)
written += 1
cnt += 1
if req is not None:
pool.add(req)
pool.wait()
pool.clean()
示例8: _waitForBlocks
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def _waitForBlocks(self, block_starts):
"""
Make sure that all blocks in the given list of blocks are present in the cache before returning.
(Blocks that are not yet present will be requested from our Input slot.)
"""
reqPool = RequestPool() # (Do the work in parallel.)
for block_start in block_starts:
entire_block_roi = getBlockBounds( self.Output.meta.shape, self._blockshape, block_start )
f = partial( self._ensureCached, entire_block_roi)
reqPool.add( Request(f) )
logger.debug( "Waiting for {} blocks...".format( len(block_starts) ) )
reqPool.wait()
示例9: testBasic
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def testBasic(self):
graph = Graph()
opDataProvider = OpArrayPiperWithAccessCount(graph=graph)
opCache = OpUnblockedArrayCache(graph=graph)
data = np.random.random((100, 100, 100)).astype(np.float32)
opDataProvider.Input.setValue(vigra.taggedView(data, "zyx"))
opCache.Input.connect(opDataProvider.Output)
assert opCache.CleanBlocks.value == []
roi = ((30, 30, 30), (50, 50, 50))
cache_data = opCache.Output(*roi).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 1
assert opCache.CleanBlocks.value == [roiToSlice(*roi)]
# Request the same data a second time.
# Access count should not change.
cache_data = opCache.Output(*roi).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 1
assert opCache.CleanBlocks.value == [roiToSlice(*roi)]
# Now invalidate a part of the data
# The cache will discard it, so the access count should increase.
opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
assert opCache.CleanBlocks.value == []
cache_data = opCache.Output(*roi).wait()
assert (cache_data == data[roiToSlice(*roi)]).all()
assert opDataProvider.accessCount == 2
# Repeat this next part just for safety
for _ in range(10):
# Make sure the cache is empty
opDataProvider.Input.setDirty((30, 30, 30), (31, 31, 31))
opDataProvider.accessCount = 0
# Create many requests for the same data.
# Upstream data should only be accessed ONCE.
pool = RequestPool()
for _ in range(10):
pool.add(opCache.Output(*roi))
pool.wait()
assert opDataProvider.accessCount == 1
# Also, make sure requests for INNER rois of stored blocks are also serviced from memory
opDataProvider.accessCount = 0
inner_roi = ((35, 35, 35), (45, 45, 45))
cache_data = opCache.Output(*inner_roi).wait()
assert (cache_data == data[roiToSlice(*inner_roi)]).all()
assert opDataProvider.accessCount == 0
assert opCache.CleanBlocks.value == [roiToSlice(*roi)]
示例10: export
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def export(self, filename, hypothesesGraph, pluginExportContext):
"""Export the tracking solution stored in the hypotheses graph as a sequence of H5 files,
one per frame, containing the label image of that frame and which objects were part
of a move or a division.
:param filename: string of the FOLDER where to save the result
:param hypothesesGraph: hytra.core.hypothesesgraph.HypothesesGraph filled with a solution
:param pluginExportContext: instance of ilastik.plugins.PluginExportContext containing:
labelImageSlot (required here) as well as objectFeaturesSlot, rawImageSlot, additionalPluginArgumentsSlot
:returns: True on success, False otherwise
"""
labelImageSlot = pluginExportContext.labelImageSlot
traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hypothesesGraph.getMappingsBetweenUUIDsAndTraxels()
timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
result = hypothesesGraph.getSolutionDictionary()
mergers, detections, links, divisions = getMergersDetectionsLinksDivisions(result, uuidToTraxelMap)
# group by timestep for event creation
mergersPerTimestep = getMergersPerTimestep(mergers, timesteps)
linksPerTimestep = getLinksPerTimestep(links, timesteps)
detectionsPerTimestep = getDetectionsPerTimestep(detections, timesteps)
divisionsPerTimestep = getDivisionsPerTimestep(divisions, linksPerTimestep, timesteps)
# save to disk in parallel
pool = RequestPool()
timeIndex = labelImageSlot.meta.axistags.index('t')
if not os.path.exists(filename):
os.makedirs(filename)
for timestep in traxelIdPerTimestepToUniqueIdMap.keys():
# extract current frame lable image
roi = [slice(None) for i in range(len(labelImageSlot.meta.shape))]
roi[timeIndex] = slice(int(timestep), int(timestep)+1)
roi = tuple(roi)
labelImage = labelImageSlot[roi].wait()
fn = os.path.join(filename, "{0:05d}.h5".format(int(timestep)))
pool.add(Request(partial(writeEvents,
int(timestep),
linksPerTimestep[timestep],
divisionsPerTimestep[timestep],
mergersPerTimestep[timestep],
detectionsPerTimestep[timestep],
fn,
labelImage)))
pool.wait()
return True
示例11: _execute_Output
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def _execute_Output(self, slot, subindex, roi, result):
"""
Overridden from OpUnblockedArrayCache
"""
def copy_block(full_block_roi, clipped_block_roi):
full_block_roi = numpy.asarray(full_block_roi)
clipped_block_roi = numpy.asarray(clipped_block_roi)
output_roi = numpy.asarray(clipped_block_roi) - roi.start
block_roi = self._get_containing_block_roi(clipped_block_roi)
# Skip cache and copy full block directly
if self.BypassModeEnabled.value:
full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi))
self.Input(*full_block_roi).writeInto(full_block_data).block()
roi_within_block = clipped_block_roi - full_block_roi[0]
self.Output.stype.copy_data(
result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)]
)
# If data data exists already or we can just fetch it without needing extra scratch space,
# just call the base class
elif block_roi is not None or (full_block_roi == clipped_block_roi).all():
self._execute_Output_impl(clipped_block_roi, result[roiToSlice(*output_roi)])
elif self.Input.meta.dontcache:
# Data isn't in the cache, but we don't need it in the cache anyway.
self.Input(*clipped_block_roi).writeInto(result[roiToSlice(*output_roi)]).block()
else:
# Data doesn't exist yet in the cache.
# Request the full block, but then discard the parts we don't need.
# (We use allocateDestination() here to support MaskedArray types.)
# TODO: We should probably just get rid of MaskedArray support altogether...
full_block_data = self.Output.stype.allocateDestination(SubRegion(self.Output, *full_block_roi))
self._execute_Output_impl(full_block_roi, full_block_data)
roi_within_block = clipped_block_roi - full_block_roi[0]
self.Output.stype.copy_data(
result[roiToSlice(*output_roi)], full_block_data[roiToSlice(*roi_within_block)]
)
clipped_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), True)
full_block_rois = getIntersectingRois(self.Input.meta.shape, self._blockshape, (roi.start, roi.stop), False)
pool = RequestPool()
for full_block_roi, clipped_block_roi in zip(full_block_rois, clipped_block_rois):
req = Request(partial(copy_block, full_block_roi, clipped_block_roi))
pool.add(req)
pool.wait()
示例12: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute(self, slot, subindex, roi, result):
assert len(roi.start) == len(roi.stop) == len(self.Output.meta.shape)
assert slot == self.Output
t_ind = self.RawVolume.meta.axistags.index('t')
assert t_ind < len(self.RawVolume.meta.shape)
def compute_features_for_time_slice(res_t_ind, t):
axes4d = [k for k in self.RawVolume.meta.getTaggedShape().keys() if k in 'xyzc']
# Process entire spatial volume
s = [slice(None)] * len(self.RawVolume.meta.shape)
s[t_ind] = slice(t, t+1)
s = tuple(s)
# Request in parallel
raw_req = self.RawVolume[s]
raw_req.submit()
label_req = self.LabelVolume[s]
label_req.submit()
if self.Atlas.ready():
atlasVolume = self.Atlas[s].wait()
atlasVolume = vigra.taggedView(atlasVolume, axistags=self.Atlas.meta.axistags)
atlasVolume = atlasVolume.withAxes(*axes4d)
else:
atlasVolume = None
# Get results
rawVolume = raw_req.wait()
labelVolume = label_req.wait()
rawVolume = vigra.taggedView(rawVolume, axistags=self.RawVolume.meta.axistags)
labelVolume = vigra.taggedView(labelVolume, axistags=self.LabelVolume.meta.axistags)
# Convert to 4D (preserve axis order)
rawVolume = rawVolume.withAxes(*axes4d)
labelVolume = labelVolume.withAxes(*axes4d)
acc = self._extract(rawVolume, labelVolume, atlasVolume)
# Copy into the result
result[res_t_ind] = acc
# loop over requested time slices
pool = RequestPool()
for res_t_ind, t in enumerate(range(roi.start[t_ind], roi.stop[t_ind])):
pool.add( Request( partial(compute_features_for_time_slice, res_t_ind, t) ) )
pool.wait()
return result
示例13: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def execute(self, slot, subindex, roi, result):
featList = []
labelsList = []
for i in range(len(self.Labels)):
feats = self.Features[i]([]).wait()
# TODO: we should be able to use self.Labels[i].value,
# but the current implementation of Slot.value() does not
# do the right thing.
labels = self.Labels[i]([]).wait()
featstmp, labelstmp = make_feature_array(feats, labels)
featList.append(featstmp)
labelsList.append(labelstmp)
featMatrix = _concatenate(featList, axis=0)
labelsMatrix = _concatenate(labelsList, axis=0)
print "training on matrix:", featMatrix.shape, featMatrix.dtype
if len(featMatrix) == 0 or len(labelsMatrix) == 0:
result[:] = None
return
oob = [0] * self.ForestCount.value
try:
# Ensure there are no NaNs in the feature matrix
# TODO: There should probably be a better way to fix this...
featMatrix = numpy.asarray(featMatrix, dtype=numpy.float32)
nanFeatMatrix = numpy.isnan(featMatrix)
if nanFeatMatrix.any():
warnings.warn("Feature matrix has NaN values! Replacing with 0.0...")
featMatrix[numpy.where(nanFeatMatrix)] = 0.0
# train and store forests in parallel
pool = RequestPool()
for i in range(self.ForestCount.value):
def train_and_store(number):
result[number] = vigra.learning.RandomForest(self._tree_count)
oob[number] = result[number].learnRF(featMatrix, numpy.asarray(labelsMatrix, dtype=numpy.uint32))
print "intermediate oob:", oob[number]
req = Request( partial(train_and_store, i) )
pool.add( req )
pool.wait()
pool.clean()
except:
print ("couldn't learn classifier")
raise
oob_total = numpy.mean(oob)
print "training finished, out of bag error:", oob_total
return result
示例14: predict
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def predict(cls, X, method='classic'):
"""
predict if the histograms in X correspond to missing regions
do this for subsets of X in parallel
"""
if cls._manager is None:
cls._manager = SVMManager()
# svm input has to be (nSamples, nFeatures) -> for us: (nSampels = len(X), nFeatures = # of histogrambins )
X_reshaped = np.zeros((len(X), len(X[0])))
for i in range(len(X)):
X_reshaped[i, :] = X[i]
n_bins = len(X[0])
if method == 'classic' or not have_sklearn:
logger.warning("no real svm used! -> PseudoSVC")
svm = PseudoSVC()
else:
# load samples for histograms of labeled regions
try:
svm = cls._manager.get(n_bins)
except SVMManager.NotTrainedError:
# fail gracefully if not trained => responsibility of user!
svm = PseudoSVC()
y = np.zeros((len(X),)) * np.nan
pool = RequestPool()
# chunk up all samples from X into chunks that will be predicted in parallel
chunk_size = 1000 # FIXME magic number??
n_chunks = len(X)/chunk_size + (1 if len(X) % chunk_size > 0 else 0)
s = [slice(k * chunk_size, min((k + 1) * chunk_size, len(X)))
for k in range(n_chunks)]
def partFun(i):
y[s[i]] = svm.predict(X_reshaped[s[i], :])
for i in range(n_chunks):
req = Request(partial(partFun, i))
pool.add(req)
pool.wait()
pool.clean()
return np.asarray(y)
示例15: _train_forests
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import add [as 别名]
def _train_forests(forests, X, y):
"""
Train all RFs (in parallel), and return the oobs.
"""
oobs = [None] * len(forests)
def store_oob_results(i, oob):
oobs[i] = oob
with Timer() as train_timer:
pool = RequestPool()
for i, forest in enumerate(forests):
req = Request( partial(forest.learnRF, X, y) )
# save the oob results
req.notify_finished( partial( store_oob_results, i ) )
pool.add( req )
pool.wait()
logger.info("Training took, {} seconds".format( train_timer.seconds() ) )
return oobs