本文整理汇总了Python中lazyflow.request.RequestPool.request方法的典型用法代码示例。如果您正苦于以下问题:Python RequestPool.request方法的具体用法?Python RequestPool.request怎么用?Python RequestPool.request使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lazyflow.request.RequestPool
的用法示例。
在下文中一共展示了RequestPool.request方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
featMatrix=[]
labelsMatrix=[]
for i,labels in enumerate(self.inputs["Labels"]):
if labels.meta.shape is not None:
labels=labels[:].wait()
indexes=numpy.nonzero(labels[...,0].view(numpy.ndarray))
#Maybe later request only part of the region?
image=self.inputs["Images"][i][:].wait()
features=image[indexes]
labels=labels[indexes]
featMatrix.append(features)
labelsMatrix.append(labels)
featMatrix=numpy.concatenate(featMatrix,axis=0)
labelsMatrix=numpy.concatenate(labelsMatrix,axis=0)
# train and store self._forest_count forests in parallel
pool = RequestPool()
for i in range(self._forest_count):
def train_and_store(number):
result[number] = vigra.learning.RandomForest(self._tree_count)
result[number].learnRF(featMatrix.astype(numpy.float32),labelsMatrix.astype(numpy.uint32))
req = pool.request(partial(train_and_store, i))
pool.wait()
return result
示例2: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
with self._lock:
if self.cache is None:
fullBlockShape = numpy.array([self.blockShape.value for i in self.Input.meta.shape])
fun = self.inputs["Function"].value
#data = self.inputs["Input"][:].wait()
#split up requests into blocks
shape = self.Input.meta.shape
numBlocks = numpy.ceil(shape/(1.0*fullBlockShape)).astype("int")
blockCache = numpy.ndarray(shape = numpy.prod(numBlocks), dtype=self.Output.meta.dtype)
pool = RequestPool()
#blocks holds the different roi keys for each of the blocks
blocks = itertools.product(*[range(i) for i in numBlocks])
blockKeys = []
for b in blocks:
start = b * fullBlockShape
stop = b * fullBlockShape + fullBlockShape
stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
blockKey = roiToSlice(start, stop)
blockKeys.append(blockKey)
def predict_block(i):
data = self.Input[blockKeys[i]].wait()
blockCache[i] = fun(data)
for i,f in enumerate(blockCache):
req = pool.request(partial(predict_block,i))
pool.wait()
pool.clean()
self.cache = [fun(blockCache)]
return self.cache
示例3: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
t1 = time.time()
key = roi.toSlice()
nlabels=self.inputs["LabelsCount"].value
traceLogger.debug("OpPredictRandomForest: Requesting classifier. roi={}".format(roi))
forests=self.inputs["Classifier"][:].wait()
if forests is None or any(x is None for x in forests):
# Training operator may return 'None' if there was no data to train with
return numpy.zeros(numpy.subtract(roi.stop, roi.start), dtype=numpy.float32)[...]
traceLogger.debug("OpPredictRandomForest: Got classifier")
#assert RF.labelCount() == nlabels, "ERROR: OpPredictRandomForest, labelCount differs from true labelCount! %r vs. %r" % (RF.labelCount(), nlabels)
newKey = key[:-1]
newKey += (slice(0,self.inputs["Image"].meta.shape[-1],None),)
res = self.inputs["Image"][newKey].wait()
shape=res.shape
prod = numpy.prod(shape[:-1])
res.shape = (prod, shape[-1])
features=res
predictions = [0]*len(forests)
def predict_forest(number):
predictions[number] = forests[number].predictProbabilities(numpy.asarray(features, dtype=numpy.float32))
t2 = time.time()
# predict the data with all the forests in parallel
pool = RequestPool()
for i,f in enumerate(forests):
req = pool.request(partial(predict_forest, i))
pool.wait()
pool.clean()
prediction=numpy.dstack(predictions)
prediction = numpy.average(prediction, axis=2)
prediction.shape = shape[:-1] + (forests[0].labelCount(),)
#prediction = prediction.reshape(*(shape[:-1] + (forests[0].labelCount(),)))
# If our LabelsCount is higher than the number of labels in the training set,
# then our results aren't really valid. FIXME !!!
# Duplicate the last label's predictions
for c in range(result.shape[-1]):
result[...,c] = prediction[...,min(c+key[-1].start, prediction.shape[-1]-1)]
t3 = time.time()
self.logger.debug("predict roi=%r took %fseconds, actual RF time was %fs, feature time was %fs" % (key, t3-t1, t3-t2, t2-t1))
return result
示例4: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
with self._lock:
if self.cache is None:
shape = self.Input.meta.shape
# self.blockshape has None in the last dimension to indicate that it should not be
# handled block-wise. None is replaced with the image shape in the respective axis.
fullBlockShape = []
for u, v in zip(self.blockShape.value, shape):
if u is not None:
fullBlockShape.append(u)
else:
fullBlockShape.append(v)
fullBlockShape = numpy.array(fullBlockShape, dtype=numpy.float64)
#data = self.inputs["Input"][:].wait()
#split up requests into blocks
numBlocks = numpy.ceil(shape / fullBlockShape).astype("int")
blockCache = numpy.ndarray(shape = numpy.prod(numBlocks), dtype=self.Output.meta.dtype)
pool = RequestPool()
#blocks holds the different roi keys for each of the blocks
blocks = itertools.product(*[list(range(i)) for i in numBlocks])
blockKeys = []
for b in blocks:
start = b * fullBlockShape
stop = b * fullBlockShape + fullBlockShape
stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
blockKey = roiToSlice(start, stop)
blockKeys.append(blockKey)
fun = self.inputs["Function"].value
def predict_block(i):
data = self.Input[blockKeys[i]].wait()
blockCache[i] = fun(data)
for i,f in enumerate(blockCache):
req = pool.request(partial(predict_block,i))
pool.wait()
pool.clean()
self.cache = [fun(blockCache)]
return self.cache
示例5: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, slot_roi, target):
assert slot == self.Features or slot == self.Output
if slot == self.Features:
feature_slice = roiToSlice(slot_roi.start, slot_roi.stop)
index = subindex[0]
feature_slice = list(feature_slice)
# Translate channel slice of this feature to the channel slice of the output slot.
output_channel_offset = self.featureOutputChannels[index][0]
feature_slice[1] = slice(
output_channel_offset + feature_slice[1].start, output_channel_offset + feature_slice[1].stop
)
slot_roi = SubRegion(self.Output, pslice=feature_slice)
# Get output slot region for this channel
return self.execute(self.Output, (), slot_roi, target)
elif slot == self.Output:
# Correlation of variable 'families' representing reference frames:
# ______________________________
# | input/output frame | input/output shape given by slots
# | _________________________ |
# | | smooth frame | | pre-smoothing op needs halo around filter roi
# | | ____________________ | |
# | | |filter frame | | | filter needs halo around target roi
# | | | _______________ | | |
# | | | | target frame | | | | target is given by output_roi
# note: The 'full_' variable prefix refers to the full 5D shape (tczyx), without 'full_' variables mostly
# refer to the 3D space subregion (zyx)
full_output_slice = slot_roi.toSlice()
logger.debug(f"OpPixelFeaturesPresmoothed: request {slot_roi.pprint()}")
assert (slot_roi.stop <= self.Output.meta.shape).all()
full_output_shape = self.Output.meta.shape
full_output_start, full_output_stop = sliceToRoi(full_output_slice, full_output_shape)
assert len(full_output_shape) == 5
if all(self.ComputeIn2d.value): # todo: check for this particular slice
axes2enlarge = (0, 1, 1)
else:
axes2enlarge = (1, 1, 1)
output_shape = full_output_shape[2:]
output_start = full_output_start[2:]
output_stop = full_output_stop[2:]
axistags = self.Output.meta.axistags
target = target.view(vigra.VigraArray)
target.axistags = copy.copy(axistags)
# filter roi in input frame
# sigma = 0.7, because the features receive a pre-smoothed array and don't need much of a neighborhood
input_filter_start, input_filter_stop = roi.enlargeRoiForHalo(
output_start, output_stop, output_shape, 0.7, self.WINDOW_SIZE, enlarge_axes=axes2enlarge
)
# smooth roi in input frame
input_smooth_start, input_smooth_stop = roi.enlargeRoiForHalo(
input_filter_start,
input_filter_stop,
output_shape,
self.max_sigma,
self.WINDOW_SIZE,
enlarge_axes=axes2enlarge,
)
# target roi in filter frame
filter_target_start = roi.TinyVector(output_start - input_filter_start)
filter_target_stop = roi.TinyVector(output_stop - input_filter_start)
# filter roi in smooth frame
smooth_filter_start = roi.TinyVector(input_filter_start - input_smooth_start)
smooth_filter_stop = roi.TinyVector(input_filter_stop - input_smooth_start)
filter_target_slice = roi.roiToSlice(filter_target_start, filter_target_stop)
input_smooth_slice = roi.roiToSlice(input_smooth_start, input_smooth_stop)
# pre-smooth for all requested time slices and all channels
full_input_smooth_slice = (full_output_slice[0], slice(None), *input_smooth_slice)
req = self.Input[full_input_smooth_slice]
source = req.wait()
req.clean()
req.destination = None
if source.dtype != numpy.float32:
sourceF = source.astype(numpy.float32)
try:
source.resize((1,), refcheck=False)
except Exception:
pass
del source
source = sourceF
sourceV = source.view(vigra.VigraArray)
sourceV.axistags = copy.copy(self.Input.meta.axistags)
dimCol = len(self.scales)
dimRow = self.matrix.shape[0]
#.........这里部分代码省略.........
示例6: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
progress = 0
numImages = len(self.Images)
self.progressSignal(progress)
featMatrix=[]
labelsMatrix=[]
tagList = []
#result[0] = self._svr
for i,labels in enumerate(self.inputs["ForegroundLabels"]):
if labels.meta.shape is not None:
opGaussian = OpGaussianSmoothing(parent = self, graph = self.graph)
opGaussian.Sigma.setValue(self.Sigma.value)
opGaussian.Input.connect(self.ForegroundLabels[i])
blocks = self.inputs["nonzeroLabelBlocks"][i][0].wait()
reqlistlabels = []
reqlistbg = []
reqlistfeat = []
progress += 10 / numImages
self.progressSignal(progress)
for b in blocks[0]:
request = opGaussian.Output[b]
#request = labels[b]
featurekey = list(b)
featurekey[-1] = slice(None, None, None)
request2 = self.Images[i][featurekey]
request3 = self.inputs["BackgroundLabels"][i][b]
reqlistlabels.append(request)
reqlistfeat.append(request2)
reqlistbg.append(request3)
traceLogger.debug("Requests prepared")
numLabelBlocks = len(reqlistlabels)
progress_outer = [progress]
if numLabelBlocks > 0:
progressInc = (80 - 10)/(numLabelBlocks * numImages)
def progressNotify(req):
progress_outer[0] += progressInc/2
self.progressSignal(progress_outer[0])
for ir, req in enumerate(reqlistfeat):
req.notify_finished(progressNotify)
req.submit()
for ir, req in enumerate(reqlistlabels):
req.notify_finished(progressNotify)
req.submit()
for ir, req in enumerate(reqlistbg):
req.notify_finished(progressNotify)
req.submit()
traceLogger.debug("Requests fired")
#Fixme: Maybe later request only part of the region?
#image=self.inputs["Images"][i][:].wait()
for ir, req in enumerate(reqlistlabels):
labblock = req.wait()
image = reqlistfeat[ir].wait()
labbgblock = reqlistbg[ir].wait()
labblock = labblock.reshape((image.shape[:-1]))
image = image.reshape((-1, image.shape[-1]))
labbgindices = np.where(labbgblock == 2)
labbgindices = np.ravel_multi_index(labbgindices, labbgblock.shape)
newDot, mapping, tags = \
self._svr.prepareDataRefactored(labblock, labbgindices)
#self._svr.prepareData(labblock, smooth = True)
labels = newDot[mapping]
features = image[mapping]
featMatrix.append(features)
labelsMatrix.append(labels)
tagList.append(tags)
progress = progress_outer[0]
traceLogger.debug("Requests processed")
self.progressSignal(80 / numImages)
if len(featMatrix) == 0 or len(labelsMatrix) == 0:
result[:] = None
else:
posTags = [tag[0] for tag in tagList]
negTags = [tag[1] for tag in tagList]
numPosTags = np.sum(posTags)
#.........这里部分代码省略.........
示例7: execute
# 需要导入模块: from lazyflow.request import RequestPool [as 别名]
# 或者: from lazyflow.request.RequestPool import request [as 别名]
def execute(self, slot, subindex, roi, result):
progress = 0
self.progressSignal(progress)
numImages = len(self.Images)
featMatrix=[]
labelsMatrix=[]
tagsMatrix = []
result[0] = SVR(self.UnderMult.value, self.OverMult.value, limitDensity = True, **self.SelectedOption.value)
for i,labels in enumerate(self.inputs["Labels"]):
if labels.meta.shape is not None:
#labels=labels[:].wait()
blocks = self.inputs["nonzeroLabelBlocks"][i][0].wait()
progress += 10/numImages
self.progressSignal(progress)
reqlistlabels = []
reqlistfeat = []
traceLogger.debug("Sending requests for {} non-zero blocks (labels and data)".format( len(blocks[0])) )
for b in blocks[0]:
request = labels[b]
featurekey = list(b)
featurekey[-1] = slice(None, None, None)
request2 = self.inputs["Images"][i][featurekey]
reqlistlabels.append(request)
reqlistfeat.append(request2)
traceLogger.debug("Requests prepared")
numLabelBlocks = len(reqlistlabels)
progress_outer = [progress] # Store in list for closure access
if numLabelBlocks > 0:
progressInc = (80-10)/numLabelBlocks/numImages
def progressNotify(req):
# Note: If we wanted perfect progress reporting, we could use lock here
# to protect the progress from being incremented simultaneously.
# But that would slow things down and imperfect reporting is okay for our purposes.
progress_outer[0] += progressInc/2
self.progressSignal(progress_outer[0])
for ir, req in enumerate(reqlistfeat):
image = req.notify_finished(progressNotify)
for ir, req in enumerate(reqlistlabels):
labblock = req.notify_finished(progressNotify)
traceLogger.debug("Requests fired")
for ir, req in enumerate(reqlistlabels):
traceLogger.debug("Waiting for a label block...")
labblock = req.wait()
traceLogger.debug("Waiting for an image block...")
image = reqlistfeat[ir].wait()
newImg, newDot, mapping, tags = \
result[0].prepareData(image, labblock, sigma = self.Sigma.value, smooth = True, normalize = False)
features = newImg[mapping]
labbla = newDot[mapping]
#indexes=np.nonzero(labblock[...,0].view(np.ndarray))
#features=image[indexes]
#labbla=labblock[indexes]
featMatrix.append(features)
labelsMatrix.append(labbla)
tagsMatrix.append(tags)
progress = progress_outer[0]
traceLogger.debug("Requests processed")
self.progressSignal(80/numImages)
if len(featMatrix) == 0 or len(labelsMatrix) == 0:
# If there was no actual data for the random forest to train with, we return None
result[:] = None
else:
featMatrix=np.concatenate(featMatrix,axis=0)
labelsMatrix=np.concatenate(labelsMatrix,axis=0)
tagsMatrix=np.concatenate(tagsMatrix,axis=0)
try:
logger.debug("Learning with Vigra...")
pool = RequestPool()
#result[0].fitPrepared(featMatrix, labelsMatrix, tagsMatrix, self.Epsilon.value)
req = pool.request(partial(result[0].fitPrepared, featMatrix, labelsMatrix, tagsMatrix, self.Epsilon.value))
pool.wait()
pool.clean()
logger.debug("Vigra finished")
except:
#.........这里部分代码省略.........