本文整理匯總了Python中lazyflow.graph.OrderedSignal類的典型用法代碼示例。如果您正苦於以下問題:Python OrderedSignal類的具體用法?Python OrderedSignal怎麽用?Python OrderedSignal使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了OrderedSignal類的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: OpTrainVectorwiseClassifierBlocked
class OpTrainVectorwiseClassifierBlocked(Operator):
Images = InputSlot(level=1)
Labels = InputSlot(level=1)
ClassifierFactory = InputSlot()
MaxLabel = InputSlot()
Classifier = OutputSlot()
# Images[N] --- MaxLabel ------
# \ \
# Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->
def __init__(self, *args, **kwargs):
super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self._opFeatureMatrixCaches = OperatorWrapper(OpFeatureMatrixCache, parent=self)
self._opFeatureMatrixCaches.LabelImage.connect(self.Labels)
self._opFeatureMatrixCaches.FeatureImage.connect(self.Images)
self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices(parent=self)
self._opConcatenateFeatureMatrices.FeatureMatrices.connect(self._opFeatureMatrixCaches.LabelAndFeatureMatrix)
self._opConcatenateFeatureMatrices.ProgressSignals.connect(self._opFeatureMatrixCaches.ProgressSignal)
self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors(parent=self)
self._opTrainFromFeatures.ClassifierFactory.connect(self.ClassifierFactory)
self._opTrainFromFeatures.LabelAndFeatureMatrix.connect(self._opConcatenateFeatureMatrices.ConcatenatedOutput)
self._opTrainFromFeatures.MaxLabel.connect(self.MaxLabel)
self.Classifier.connect(self._opTrainFromFeatures.Classifier)
# Progress reporting
def _handleFeatureProgress(progress):
# Note that these progress messages will probably appear out-of-order.
# See comments in OpFeatureMatrixCache
logger.debug("Training: {:02}% (Computing features)".format(int(progress)))
self.progressSignal(0.8 * progress)
self._opConcatenateFeatureMatrices.progressSignal.subscribe(_handleFeatureProgress)
def _handleTrainingComplete():
logger.debug("Training: 100% (Complete)")
self.progressSignal(100.0)
self._opTrainFromFeatures.trainingCompleteSignal.subscribe(_handleTrainingComplete)
def cleanUp(self):
self.progressSignal.clean()
self.Classifier.disconnect()
super(OpTrainVectorwiseClassifierBlocked, self).cleanUp()
def setupOutputs(self):
pass # Nothing to do; our output is connected to an internal operator.
def execute(self, slot, subindex, roi, result):
assert False, "Shouldn't get here..."
def propagateDirty(self, slot, subindex, roi):
pass
示例2: OpTrainVectorwiseClassifierBlocked
class OpTrainVectorwiseClassifierBlocked(Operator):
Images = InputSlot(level=1)
Labels = InputSlot(level=1)
ClassifierFactory = InputSlot()
nonzeroLabelBlocks = InputSlot(level=1) # TODO: Eliminate this slot. It isn't used any more...
MaxLabel = InputSlot()
Classifier = OutputSlot()
# Images[N] --- MaxLabel ------
# \ \
# Labels[N] --> opFeatureMatrixCaches ---(FeatureImage[N])---> opConcatenateFeatureImages ---(label+feature matrix)---> OpTrainFromFeatures ---(Classifier)--->
def __init__(self, *args, **kwargs):
super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
self.Classifier.connect( self._opTrainFromFeatures.Classifier )
# Progress reporting
def _handleFeatureProgress( progress ):
self.progressSignal( 0.8*progress )
self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
def _handleTrainingComplete():
self.progressSignal( 100.0 )
self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )
def cleanUp(self):
self.progressSignal.clean()
self.Classifier.disconnect()
super( OpTrainVectorwiseClassifierBlocked, self ).cleanUp()
def setupOutputs(self):
pass # Nothing to do; our output is connected to an internal operator.
def execute(self, slot, subindex, roi, result):
assert False, "Shouldn't get here..."
def propagateDirty(self, slot, subindex, roi):
pass
示例3: __init__
def __init__(self, *args, **kwargs):
super(OpTrainVectorwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self._opFeatureMatrixCaches = OperatorWrapper( OpFeatureMatrixCache, parent=self )
self._opFeatureMatrixCaches.LabelImage.connect( self.Labels )
self._opFeatureMatrixCaches.FeatureImage.connect( self.Images )
self._opFeatureMatrixCaches.NonZeroLabelBlocks.connect( self.nonzeroLabelBlocks )
self._opConcatenateFeatureMatrices = OpConcatenateFeatureMatrices( parent=self )
self._opConcatenateFeatureMatrices.FeatureMatrices.connect( self._opFeatureMatrixCaches.LabelAndFeatureMatrix )
self._opConcatenateFeatureMatrices.ProgressSignals.connect( self._opFeatureMatrixCaches.ProgressSignal )
self._opTrainFromFeatures = OpTrainClassifierFromFeatureVectors( parent=self )
self._opTrainFromFeatures.ClassifierFactory.connect( self.ClassifierFactory )
self._opTrainFromFeatures.LabelAndFeatureMatrix.connect( self._opConcatenateFeatureMatrices.ConcatenatedOutput )
self._opTrainFromFeatures.MaxLabel.connect( self.MaxLabel )
self.Classifier.connect( self._opTrainFromFeatures.Classifier )
# Progress reporting
def _handleFeatureProgress( progress ):
self.progressSignal( 0.8*progress )
self._opConcatenateFeatureMatrices.progressSignal.subscribe( _handleFeatureProgress )
def _handleTrainingComplete():
self.progressSignal( 100.0 )
self._opTrainFromFeatures.trainingCompleteSignal.subscribe( _handleTrainingComplete )
示例4: __init__
def __init__(self, *args, **kwargs):
super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
# Normally, lane removal does not trigger a dirty notification.
# But in this case, if the lane contained any label data whatsoever,
# the classifier needs to be marked dirty.
# We know which slots contain (or contained) label data because they have
# been 'touched' at some point (they became dirty at some point).
self._touched_slots = set()
def handle_new_lane(multislot, index, newlength):
def handle_dirty_lane(slot, roi):
self._touched_slots.add(slot)
multislot[index].notifyDirty(handle_dirty_lane)
self.Labels.notifyInserted(handle_new_lane)
def handle_remove_lane(multislot, index, newlength):
# If the lane we're removing contained
# label data, then mark the downstream dirty
if multislot[index] in self._touched_slots:
self.Classifier.setDirty()
self._touched_slots.remove(multislot[index])
self.Labels.notifyRemove(handle_remove_lane)
示例5: __init__
def __init__(self, *args, **kwargs):
super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self.d = None
self.f = None
示例6: OpH5WriterBigDataset
class OpH5WriterBigDataset(Operator):
name = "H5 File Writer BigDataset"
category = "Output"
inputSlots = [InputSlot("hdf5File"), # Must be an already-open hdf5File (or group) for writing to
InputSlot("hdf5Path", stype = "string"),
InputSlot("Image"),
InputSlot("CompressionEnabled", value=True)]
outputSlots = [OutputSlot("WriteImage")]
loggingName = __name__ + ".OpH5WriterBigDataset"
logger = logging.getLogger(loggingName)
traceLogger = logging.getLogger("TRACE." + loggingName)
def __init__(self, *args, **kwargs):
super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self.d = None
self.f = None
def cleanUp(self):
super( OpH5WriterBigDataset, self ).cleanUp()
# Discard the reference to the dataset, to ensure that hdf5 can close the file.
self.d = None
self.f = None
self.progressSignal.clean()
def setupOutputs(self):
self.outputs["WriteImage"].meta.shape = (1,)
self.outputs["WriteImage"].meta.dtype = object
self.f = self.inputs["hdf5File"].value
hdf5Path = self.inputs["hdf5Path"].value
# On windows, there may be backslashes.
hdf5Path = hdf5Path.replace('\\', '/')
hdf5GroupName, datasetName = os.path.split(hdf5Path)
if hdf5GroupName == "":
g = self.f
else:
if hdf5GroupName in self.f:
g = self.f[hdf5GroupName]
else:
g = self.f.create_group(hdf5GroupName)
dataShape=self.Image.meta.shape
self.logger.info( "Data shape: {}".format(dataShape))
dtype = self.Image.meta.dtype
if type(dtype) is numpy.dtype:
# Make sure we're dealing with a type (e.g. numpy.float64),
# not a numpy.dtype
dtype = dtype.type
# Set up our chunk shape: Aim for a cube that's roughly 512k in size
dtypeBytes = dtype().nbytes
tagged_maxshape = self.Image.meta.getTaggedShape()
if 't' in tagged_maxshape:
# Assume that chunks should not span multiple t-slices
tagged_maxshape['t'] = 1
self.chunkShape = determineBlockShape( tagged_maxshape.values(), 512000.0 / dtypeBytes )
if datasetName in g.keys():
del g[datasetName]
kwargs = { 'shape' : dataShape, 'dtype' : dtype,
'chunks' : self.chunkShape }
if self.CompressionEnabled.value:
kwargs['compression'] = 'gzip' # <-- Would be nice to use lzf compression here, but that is h5py-specific.
kwargs['compression_opts'] = 1 # <-- Optimize for speed, not disk space.
self.d=g.create_dataset(datasetName, **kwargs)
if self.Image.meta.drange is not None:
self.d.attrs['drange'] = self.Image.meta.drange
def execute(self, slot, subindex, rroi, result):
self.progressSignal(0)
# Save the axistags as a dataset attribute
self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()
def handle_block_result(roi, data):
slicing = roiToSlice(*roi)
if data.flags.c_contiguous:
self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
else:
self.d[slicing] = data
requester = BigRequestStreamer( self.Image, roiFromShape( self.Image.meta.shape ) )
requester.resultSignal.subscribe( handle_block_result )
requester.progressSignal.subscribe( self.progressSignal )
requester.execute()
# Be paranoid: Flush right now.
self.f.file.flush()
# We're finished.
result[0] = True
#.........這裏部分代碼省略.........
示例7: OpTrainPixelwiseClassifierBlocked
class OpTrainPixelwiseClassifierBlocked(Operator):
Images = InputSlot(level=1)
Labels = InputSlot(level=1)
ClassifierFactory = InputSlot()
nonzeroLabelBlocks = InputSlot(level=1)
MaxLabel = InputSlot()
Classifier = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
def setupOutputs(self):
for slot in list(self.Images) + list(self.Labels):
assert slot.meta.getAxisKeys()[-1] == 'c', \
"This opearator assumes channel is the last axis."
self.Classifier.meta.dtype = object
self.Classifier.meta.shape = (1,)
# Special metadata for downstream operators using the classifier
self.Classifier.meta.classifier_factory = self.ClassifierFactory.value
def cleanUp(self):
self.progressSignal.clean()
super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()
def execute(self, slot, subindex, roi, result):
classifier_factory = self.ClassifierFactory.value
assert issubclass(type(classifier_factory), LazyflowPixelwiseClassifierFactoryABC), \
"Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
"".format( type(classifier_factory) )
# Accumulate all non-zero blocks of each image into lists
label_data_blocks = []
image_data_blocks = []
for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
block_slicings = nonzero_block_slot.value
for block_slicing in block_slicings:
block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )
# Ask for the halo needed by the classifier
axiskeys = image_slot.meta.getAxisKeys()
halo_shape = classifier_factory.get_halo_shape(axiskeys)
assert len(halo_shape) == len( block_label_roi[0] )
assert halo_shape[-1] == 0, "Didn't expect a non-zero halo for channel dimension."
# Expand block by halo, then clip to image bounds
block_label_roi = numpy.array( block_label_roi )
block_label_roi[0] -= halo_shape
block_label_roi[1] += halo_shape
block_label_roi = getIntersection( block_label_roi, roiFromShape(image_slot.meta.shape) )
block_image_roi = numpy.array( block_label_roi )
assert (block_image_roi[:, -1] == [0,1]).all()
num_channels = image_slot.meta.shape[-1]
block_image_roi[:, -1] = [0, num_channels]
# Ensure the results are plain ndarray, not VigraArray,
# which some classifiers might have trouble with.
block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
label_data_blocks.append( block_label_data )
image_data_blocks.append( block_image_data )
classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
assert issubclass(type(classifier), LazyflowPixelwiseClassifierABC), \
"Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
"".format( type(classifier) )
result[0] = classifier
return result
def propagateDirty(self, slot, subindex, roi):
self.Classifier.setDirty()
示例8: OpTrainPixelwiseClassifierBlocked
class OpTrainPixelwiseClassifierBlocked(Operator):
Images = InputSlot(level=1)
Labels = InputSlot(level=1)
ClassifierFactory = InputSlot()
nonzeroLabelBlocks = InputSlot(level=1)
MaxLabel = InputSlot()
Classifier = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpTrainPixelwiseClassifierBlocked, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
def setupOutputs(self):
self.Classifier.meta.dtype = object
self.Classifier.meta.shape = (1,)
# Special metadata for downstream operators using the classifier
self.Classifier.meta.classifier_factory = self.ClassifierFactory.value
def cleanUp(self):
self.progressSignal.clean()
super( OpTrainPixelwiseClassifierBlocked, self ).cleanUp()
def execute(self, slot, subindex, roi, result):
classifier_factory = self.ClassifierFactory.value
assert isinstance(classifier_factory, LazyflowPixelwiseClassifierFactoryABC), \
"Factory is of type {}, which does not satisfy the LazyflowPixelwiseClassifierFactoryABC interface."\
"".format( type(classifier_factory) )
# Accumulate all non-zero blocks of each image into lists
label_data_blocks = []
image_data_blocks = []
for image_slot, label_slot, nonzero_block_slot in zip(self.Images, self.Labels, self.nonzeroLabelBlocks):
block_slicings = nonzero_block_slot.value
for block_slicing in block_slicings:
block_label_roi = sliceToRoi( block_slicing, image_slot.meta.shape )
block_image_roi = numpy.array( block_label_roi )
assert (block_image_roi[:, -1] == [0,1]).all()
num_channels = image_slot.meta.shape[-1]
block_image_roi[:, -1] = [0, num_channels]
# TODO: Compensate for the halo as specified by the classifier...
#axiskeys = image_slot.meta.getAxisKeys()
#halo_shape = classifier_factory.get_halo_shape(axiskeys)
# Ensure the results are plain ndarray, not VigraArray,
# which some classifiers might have trouble with.
block_label_data = numpy.asarray( label_slot(*block_label_roi).wait() )
block_image_data = numpy.asarray( image_slot(*block_image_roi).wait() )
label_data_blocks.append( block_label_data )
image_data_blocks.append( block_image_data )
classifier = classifier_factory.create_and_train_pixelwise( image_data_blocks, label_data_blocks )
assert isinstance(classifier, LazyflowPixelwiseClassifierABC), \
"Classifier is of type {}, which does not satisfy the LazyflowPixelwiseClassifierABC interface."\
"".format( type(classifier) )
result[0] = classifier
return result
def propagateDirty(self, slot, subindex, roi):
print 'classifier is dirty...'
self.Classifier.setDirty()
示例9: __init__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self.d = None
self.f = None
示例10: OpH5N5WriterBigDataset
class OpH5N5WriterBigDataset(Operator):
name = "H5 and N5 File Writer BigDataset"
category = "Output"
h5N5File = InputSlot() # Must be an already-open hdf5File/n5File (or group) for writing to
h5N5Path = InputSlot()
Image = InputSlot()
# h5py uses single-threaded gzip comression, which really slows down export.
CompressionEnabled = InputSlot(value=False)
BatchSize = InputSlot(optional=True)
WriteImage = OutputSlot()
loggingName = __name__ + ".OpH5N5WriterBigDataset"
logger = logging.getLogger(loggingName)
traceLogger = logging.getLogger("TRACE." + loggingName)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self.d = None
self.f = None
def cleanUp(self):
super().cleanUp()
# Discard the reference to the dataset, to ensure that the file can be closed.
self.d = None
self.f = None
self.progressSignal.clean()
def setupOutputs(self):
self.outputs["WriteImage"].meta.shape = (1,)
self.outputs["WriteImage"].meta.dtype = object
self.f = self.inputs["h5N5File"].value
h5N5Path = self.inputs["h5N5Path"].value
# On windows, there may be backslashes.
h5N5Path = h5N5Path.replace("\\", "/")
h5N5GroupName, datasetName = os.path.split(h5N5Path)
if h5N5GroupName == "":
g = self.f
else:
if h5N5GroupName in self.f:
g = self.f[h5N5GroupName]
else:
g = self.f.create_group(h5N5GroupName)
dataShape = self.Image.meta.shape
self.logger.info(f"Data shape: {dataShape}")
dtype = self.Image.meta.dtype
if isinstance(dtype, numpy.dtype):
# Make sure we're dealing with a type (e.g. numpy.float64),
# not a numpy.dtype
dtype = dtype.type
# Set up our chunk shape: Aim for a cube that's roughly 512k in size
dtypeBytes = dtype().nbytes
tagged_maxshape = self.Image.meta.getTaggedShape()
if "t" in tagged_maxshape:
# Assume that chunks should not span multiple t-slices,
# and channels are often handled separately, too.
tagged_maxshape["t"] = 1
if "c" in tagged_maxshape:
tagged_maxshape["c"] = 1
self.chunkShape = determineBlockShape(list(tagged_maxshape.values()), 512_000.0 / dtypeBytes)
if datasetName in list(g.keys()):
del g[datasetName]
kwargs = {"shape": dataShape, "dtype": dtype, "chunks": self.chunkShape}
if self.CompressionEnabled.value:
kwargs["compression"] = "gzip" # <-- Would be nice to use lzf compression here, but that is h5py-specific.
if isinstance(self.f, h5py.File):
kwargs["compression_opts"] = 1 # <-- Optimize for speed, not disk space.
else: # z5py has uses different names here
kwargs["level"] = 1 # <-- Optimize for speed, not disk space.
else:
if isinstance(self.f, z5py.N5File): # n5 uses gzip level 5 as default compression.
kwargs["compression"] = "raw"
self.d = g.create_dataset(datasetName, **kwargs)
if self.Image.meta.drange is not None:
self.d.attrs["drange"] = self.Image.meta.drange
if self.Image.meta.display_mode is not None:
self.d.attrs["display_mode"] = self.Image.meta.display_mode
def execute(self, slot, subindex, rroi, result):
self.progressSignal(0)
# Save the axistags as a dataset attribute
self.d.attrs["axistags"] = self.Image.meta.axistags.toJSON()
def handle_block_result(roi, data):
slicing = roiToSlice(*roi)
if data.flags.c_contiguous:
#.........這裏部分代碼省略.........
示例11: OpH5WriterBigDataset
class OpH5WriterBigDataset(Operator):
name = "H5 File Writer BigDataset"
category = "Output"
inputSlots = [InputSlot("hdf5File"), # Must be an already-open hdf5File (or group) for writing to
InputSlot("hdf5Path", stype = "string"),
InputSlot("Image"),
InputSlot("CompressionEnabled", value=True)]
outputSlots = [OutputSlot("WriteImage")]
loggingName = __name__ + ".OpH5WriterBigDataset"
logger = logging.getLogger(loggingName)
traceLogger = logging.getLogger("TRACE." + loggingName)
def __init__(self, *args, **kwargs):
super(OpH5WriterBigDataset, self).__init__(*args, **kwargs)
self.progressSignal = OrderedSignal()
self.d = None
self.f = None
def cleanUp(self):
super( OpH5WriterBigDataset, self ).cleanUp()
# Discard the reference to the dataset, to ensure that hdf5 can close the file.
self.d = None
self.f = None
self.progressSignal.clean()
def setupOutputs(self):
self.outputs["WriteImage"].meta.shape = (1,)
self.outputs["WriteImage"].meta.dtype = object
self.f = self.inputs["hdf5File"].value
hdf5Path = self.inputs["hdf5Path"].value
# On windows, there may be backslashes.
hdf5Path = hdf5Path.replace('\\', '/')
hdf5GroupName, datasetName = os.path.split(hdf5Path)
if hdf5GroupName == "":
g = self.f
else:
if hdf5GroupName in self.f:
g = self.f[hdf5GroupName]
else:
g = self.f.create_group(hdf5GroupName)
dataShape=self.Image.meta.shape
taggedShape = self.Image.meta.getTaggedShape()
dtype = self.Image.meta.dtype
if type(dtype) is numpy.dtype:
# Make sure we're dealing with a type (e.g. numpy.float64),
# not a numpy.dtype
dtype = dtype.type
numChannels = 1
if 'c' in taggedShape:
numChannels = taggedShape['c']
# Set up our chunk shape: Aim for a cube that's roughly 300k in size
dtypeBytes = dtype().nbytes
cubeDim = math.pow( 300000 / (numChannels * dtypeBytes), (1/3.0) )
cubeDim = int(cubeDim)
chunkDims = {}
chunkDims['t'] = 1
chunkDims['x'] = cubeDim
chunkDims['y'] = cubeDim
chunkDims['z'] = cubeDim
chunkDims['c'] = numChannels
# h5py guide to chunking says chunks of 300k or less "work best"
assert chunkDims['x'] * chunkDims['y'] * chunkDims['z'] * numChannels * dtypeBytes <= 300000
chunkShape = ()
for i in range( len(dataShape) ):
axisKey = self.Image.meta.axistags[i].key
# Chunk shape can't be larger than the data shape
chunkShape += ( min( chunkDims[axisKey], dataShape[i] ), )
self.chunkShape = chunkShape
if datasetName in g.keys():
del g[datasetName]
kwargs = { 'shape' : dataShape, 'dtype' : dtype, 'chunks' : self.chunkShape }
if self.CompressionEnabled.value:
kwargs['compression'] = 'gzip' # <-- Would be nice to use lzf compression here, but that is h5py-specific.
kwargs['compression_opts'] = 1 # <-- Optimize for speed, not disk space.
self.d=g.create_dataset(datasetName, **kwargs)
if self.Image.meta.drange is not None:
self.d.attrs['drange'] = self.Image.meta.drange
def execute(self, slot, subindex, rroi, result):
self.progressSignal(0)
slicings=self.computeRequestSlicings()
numSlicings = len(slicings)
self.logger.debug( "Dividing work into {} pieces".format( len(slicings) ) )
#.........這裏部分代碼省略.........