本文整理汇总了Python中lazyflow.operators.opReorderAxes.OpReorderAxes类的典型用法代码示例。如果您正苦于以下问题:Python OpReorderAxes类的具体用法?Python OpReorderAxes怎么用?Python OpReorderAxes使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OpReorderAxes类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_Writer
def test_Writer(self):
opData = OpArrayCache( graph=self.graph )
opData.blockShape.setValue( self.testData.shape )
opData.Input.setValue( self.testData )
opExport = OpExportMultipageTiffSequence(graph=self.graph)
opExport.FilepathPattern.setValue( self._stack_filepattern )
opExport.Input.connect( opData.Output )
opExport.SliceIndexOffset.setValue(22)
# Run the export
opExport.run_export()
globstring = self._stack_filepattern.format( slice_index=999 )
globstring = globstring.replace('999', '*')
opReader = OpStackLoader( graph=self.graph )
opReader.globstring.setValue( globstring )
# (The OpStackLoader produces txyzc order.)
opReorderAxes = OpReorderAxes( graph=self.graph )
opReorderAxes.AxisOrder.setValue( self._axisorder )
opReorderAxes.Input.connect( opReader.stack )
readData = opReorderAxes.Output[:].wait()
logger.debug("Expected shape={}".format( self.testData.shape ) )
logger.debug("Read shape={}".format( readData.shape ) )
assert opReorderAxes.Output.meta.shape == self.testData.shape, "Exported files were of the wrong shape or number."
assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), "Exported data was not correct"
opReorderAxes.cleanUp()
opReader.cleanUp()
示例2: testLotsOfOptions
def testLotsOfOptions(self):
#OLD_LAZYFLOW_STATUS_MONITOR_SECONDS = os.getenv("LAZYFLOW_STATUS_MONITOR_SECONDS", None)
#os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = "1"
# NOTE: In this test, cmd-line args to nosetests will also end up getting "parsed" by ilastik.
# That shouldn't be an issue, since the pixel classification workflow ignores unrecognized options.
# See if __name__ == __main__ section, below.
args = []
args.append( "--project=" + self.PROJECT_FILE )
args.append( "--headless" )
#args.append( "--sys_tmp_dir=/tmp" )
# Batch export options
args.append( '--export_source=Simple Segmentation' )
args.append( '--output_format=png sequence' ) # If we were actually launching from the command line, 'png sequence' would be in quotes...
args.append( "--output_filename_format={dataset_dir}/{nickname}_segmentation_z{slice_index}.png" )
args.append( "--export_dtype=uint8" )
args.append( "--output_axis_order=zxyc" )
args.append( "--pipeline_result_drange=(0,2)" )
args.append( "--export_drange=(0,255)" )
args.append( "--cutout_subregion=[(0,50,50,0,0), (1, 150, 150, 50, 1)]" )
args.append( self.SAMPLE_DATA )
old_sys_argv = list(sys.argv)
sys.argv = ['ilastik.py'] # Clear the existing commandline args so it looks like we're starting fresh.
sys.argv += args
# Start up the ilastik.py entry script as if we had launched it from the command line
# This will execute the batch mode script
try:
self.ilastik_startup.main()
finally:
sys.argv = old_sys_argv
# if OLD_LAZYFLOW_STATUS_MONITOR_SECONDS:
# os.environ["LAZYFLOW_STATUS_MONITOR_SECONDS"] = OLD_LAZYFLOW_STATUS_MONITOR_SECONDS
output_path = self.SAMPLE_DATA[:-4] + "_segmentation_z{slice_index}.png"
globstring = output_path.format( slice_index=999 )
globstring = globstring.replace('999', '*')
opReader = OpStackLoader( graph=Graph() )
opReader.globstring.setValue( globstring )
# (The OpStackLoader produces txyzc order.)
opReorderAxes = OpReorderAxes( graph=Graph() )
opReorderAxes.AxisOrder.setValue( 'tzyxc' )
opReorderAxes.Input.connect( opReader.stack )
try:
readData = opReorderAxes.Output[:].wait()
# Check basic attributes
assert readData.shape[:-1] == self.data[0:1, 50:150, 50:150, 0:50, 0:1].shape[:-1] # Assume channel is last axis
assert readData.shape[-1] == 1, "Wrong number of channels. Expected 1, got {}".format( readData.shape[-1] )
finally:
# Clean-up.
opReorderAxes.cleanUp()
opReader.cleanUp()
示例3: test_basic
def test_basic(self):
opData = OpArrayCache( graph=self.graph )
opData.blockShape.setValue( self.testData.shape )
opData.Input.setValue( self.testData )
filepath = os.path.join( self._tmpdir, 'multipage.tiff' )
logger.debug( "writing to: {}".format(filepath) )
opExport = OpExportMultipageTiff(graph=self.graph)
opExport.Filepath.setValue( filepath )
opExport.Input.connect( opData.Output )
# Run the export
opExport.run_export()
opReader = OpInputDataReader( graph=self.graph )
opReader.FilePath.setValue( filepath )
# The reader assumes xyzc order.
# We have to transpose the data before we compare.
opReorderAxes = OpReorderAxes( graph=self.graph )
opReorderAxes.AxisOrder.setValue( self._axisorder )
opReorderAxes.Input.connect( opReader.Output )
readData = opReorderAxes.Output[:].wait()
logger.debug("Expected shape={}".format( self.testData.shape ) )
logger.debug("Read shape={}".format( readData.shape ) )
assert opReorderAxes.Output.meta.shape == self.testData.shape, "Exported files were of the wrong shape or number."
assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), "Exported data was not correct"
# Cleanup
opReorderAxes.cleanUp()
opReader.cleanUp()
示例4: handleImportLabelsAction
def handleImportLabelsAction():
# Find the directory of the most recently opened image file
mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
if mostRecentImageFile is not None:
defaultDirectory = os.path.split(mostRecentImageFile)[0]
else:
defaultDirectory = os.path.expanduser('~')
fileNames = DataSelectionGui.getImageFileNamesToOpen(self, defaultDirectory)
fileNames = list(map(str, fileNames))
# For now, we require a single hdf5 file
if len(fileNames) > 1:
QMessageBox.critical(self, "Too many files",
"Labels must be contained in a single hdf5 volume.")
return
if len(fileNames) == 0:
# user cancelled
return
file_path = fileNames[0]
internal_paths = DataSelectionGui.getPossibleInternalPaths(file_path)
if len(internal_paths) == 0:
QMessageBox.critical(self, "No volumes in file",
"Couldn't find a suitable dataset in your hdf5 file.")
return
if len(internal_paths) == 1:
internal_path = internal_paths[0]
else:
dlg = H5VolumeSelectionDlg(internal_paths, self)
if dlg.exec_() == QDialog.Rejected:
return
selected_index = dlg.combo.currentIndex()
internal_path = str(internal_paths[selected_index])
path_components = PathComponents(file_path)
path_components.internalPath = str(internal_path)
try:
top_op = self.topLevelOperatorView
opReader = OpInputDataReader(parent=top_op.parent)
opReader.FilePath.setValue( path_components.totalPath() )
# Reorder the axes
op5 = OpReorderAxes(parent=top_op.parent)
op5.AxisOrder.setValue( top_op.LabelInputs.meta.getAxisKeys() )
op5.Input.connect( opReader.Output )
# Finally, import the labels
top_op.importLabels( top_op.current_view_index(), op5.Output )
finally:
op5.cleanUp()
opReader.cleanUp()
示例5: getVoluminaShapeForSlot
def getVoluminaShapeForSlot(self, slot):
shape = None
if slot.ready() and slot.meta.axistags is not None:
# Use an OpReorderAxes adapter to transpose the shape for us.
op5 = OpReorderAxes( parent=slot.getRealOperator().parent )
op5.Input.connect( slot )
shape = op5.Output.meta.shape
# We just needed the operator to determine the transposed shape.
# Disconnect it so it can be garbage collected.
op5.Input.disconnect()
op5.cleanUp()
return shape
示例6: testLotsOfOptions
def testLotsOfOptions(self):
# NOTE: In this test, cmd-line args to nosetests will also end up getting "parsed" by ilastik.
# That shouldn't be an issue, since the pixel classification workflow ignores unrecognized options.
# See if __name__ == __main__ section, below.
args = []
args.append( "--project=" + self.PROJECT_FILE )
args.append( "--headless" )
args.append( "--sys_tmp_dir=/tmp" )
# Batch export options
args.append( '--output_format=png sequence' ) # If we were actually launching from the command line, 'png sequence' would be in quotes...
args.append( "--output_filename_format={dataset_dir}/{nickname}_prediction_z{slice_index}.png" )
args.append( "--export_dtype=uint8" )
args.append( "--output_axis_order=zxyc" )
args.append( "--pipeline_result_drange=(0.0,1.0)" )
args.append( "--export_drange=(0,255)" )
args.append( "--cutout_subregion=[(0,50,50,0,0), (1, 150, 150, 50, 2)]" )
args.append( self.SAMPLE_DATA )
sys.argv = ['ilastik.py'] # Clear the existing commandline args so it looks like we're starting fresh.
sys.argv += args
# Start up the ilastik.py entry script as if we had launched it from the command line
# This will execute the batch mode script
ilastik_entry_file_path = os.path.join( os.path.split( ilastik.__file__ )[0], "../ilastik.py" )
imp.load_source( 'main', ilastik_entry_file_path )
output_path = self.SAMPLE_DATA[:-4] + "_prediction_z{slice_index}.png"
globstring = output_path.format( slice_index=999 )
globstring = globstring.replace('999', '*')
opReader = OpStackLoader( graph=Graph() )
opReader.globstring.setValue( globstring )
# (The OpStackLoader produces txyzc order.)
opReorderAxes = OpReorderAxes( graph=Graph() )
opReorderAxes.AxisOrder.setValue( 'txyzc' )
opReorderAxes.Input.connect( opReader.stack )
readData = opReorderAxes.Output[:].wait()
# Check basic attributes
assert readData.shape[:-1] == self.data[0:1, 50:150, 50:150, 0:50, 0:2].shape[:-1] # Assume channel is last axis
assert readData.shape[-1] == 2, "Wrong number of channels. Expected 2, got {}".format( readData.shape[-1] )
# Clean-up.
opReorderAxes.cleanUp()
opReader.cleanUp()
示例7: testBasic_MultipageTiffSequence
def testBasic_MultipageTiffSequence(self):
data = 255 * numpy.random.random((5, 10, 50, 100, 3))
data = data.astype(numpy.uint8)
data = vigra.taggedView(data, vigra.defaultAxistags("tzyxc"))
# Must run this through an operator
# Can't use opExport.setValue() because because OpStackWriter can't work with ValueRequests
graph = Graph()
opData = OpBlockedArrayCache(graph=graph)
opData.BlockShape.setValue(data.shape)
opData.Input.setValue(data)
filepattern = self._tmpdir + "/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}_t{slice_index}"
opExport = OpExportSlot(graph=graph)
opExport.Input.connect(opData.Output)
opExport.OutputFormat.setValue("multipage tiff sequence")
opExport.OutputFilenameFormat.setValue(filepattern)
opExport.CoordinateOffset.setValue((7, 10, 20, 30, 0))
opExport.run_export()
export_pattern = opExport.ExportPath.value
globstring = export_pattern.format(slice_index=999)
globstring = globstring.replace("999", "*")
opReader = OpTiffSequenceReader(graph=graph)
opReorderAxes = OpReorderAxes(graph=graph)
try:
opReader.GlobString.setValue(globstring)
# (The OpStackLoader produces txyzc order.)
opReorderAxes.AxisOrder.setValue("tzyxc")
opReorderAxes.Input.connect(opReader.Output)
assert opReorderAxes.Output.meta.shape == data.shape, "Exported files were of the wrong shape or number."
assert (opReorderAxes.Output[:].wait() == data.view(numpy.ndarray)).all(), "Exported data was not correct"
finally:
opReorderAxes.cleanUp()
opReader.cleanUp()
示例8: test_basic
def test_basic(self):
opSource = OpArrayPiper(graph=self.graph)
opSource.Input.setValue( self.testData )
opData = OpArrayCache( graph=self.graph )
opData.blockShape.setValue( self.testData.shape )
opData.Input.connect( opSource.Output )
filepath = os.path.join( self._tmpdir, 'multipage.tiff' )
logger.debug( "writing to: {}".format(filepath) )
opExport = OpExportMultipageTiff(graph=self.graph)
opExport.Filepath.setValue( filepath )
opExport.Input.connect( opData.Output )
# Run the export
opExport.run_export()
opReader = OpTiffReader( graph=self.graph )
try:
opReader.Filepath.setValue( filepath )
# Re-order before comparing
opReorderAxes = OpReorderAxes( graph=self.graph )
try:
opReorderAxes.AxisOrder.setValue( self._axisorder )
opReorderAxes.Input.connect( opReader.Output )
readData = opReorderAxes.Output[:].wait()
logger.debug("Expected shape={}".format( self.testData.shape ) )
logger.debug("Read shape={}".format( readData.shape ) )
assert opReorderAxes.Output.meta.shape == self.testData.shape, \
"Exported files were of the wrong shape or number."
assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), \
"Exported data was not correct"
finally:
opReorderAxes.cleanUp()
finally:
opReader.cleanUp()
示例9: import_labeling_layer
def import_labeling_layer(labelLayer, labelingSlots, parent_widget=None):
"""
Prompt the user for layer import settings, and perform the layer import.
:param labelLayer: The top label layer source
:param labelingSlots: An instance of LabelingGui.LabelingSlots
:param parent_widget: The Qt GUI parent object
"""
writeSeeds = labelingSlots.labelInput
assert isinstance(writeSeeds, lazyflow.graph.Slot), "slot is of type %r" % (type(writeSeeds))
opLabels = writeSeeds.getRealOperator()
assert isinstance(opLabels, lazyflow.graph.Operator), "slot's operator is of type %r" % (type(opLabels))
recentlyImported = PreferencesManager().get('labeling', 'recently imported')
mostRecentProjectPath = PreferencesManager().get('shell', 'recently opened')
mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
if recentlyImported:
defaultDirectory = os.path.split(recentlyImported)[0]
elif mostRecentProjectPath:
defaultDirectory = os.path.split(mostRecentProjectPath)[0]
elif mostRecentImageFile:
defaultDirectory = os.path.split(mostRecentImageFile)[0]
else:
defaultDirectory = os.path.expanduser('~')
fileNames = DataSelectionGui.getImageFileNamesToOpen(parent_widget, defaultDirectory)
fileNames = map(str, fileNames)
if not fileNames:
return
PreferencesManager().set('labeling', 'recently imported', fileNames[0])
try:
# Initialize operators
opImport = OpInputDataReader( parent=opLabels.parent )
opCache = OpArrayCache( parent=opLabels.parent )
opMetadataInjector = OpMetadataInjector( parent=opLabels.parent )
opReorderAxes = OpReorderAxes( parent=opLabels.parent )
# Set up the pipeline as follows:
#
# opImport --> opCache --> opMetadataInjector --------> opReorderAxes --(inject via setInSlot)--> labelInput
# / /
# User-specified axisorder labelInput.meta.axistags
opImport.WorkingDirectory.setValue(defaultDirectory)
opImport.FilePath.setValue(fileNames[0] if len(fileNames) == 1 else
os.path.pathsep.join(fileNames))
assert opImport.Output.ready()
opCache.blockShape.setValue( opImport.Output.meta.shape )
opCache.Input.connect( opImport.Output )
assert opCache.Output.ready()
opMetadataInjector.Input.connect( opCache.Output )
metadata = opCache.Output.meta.copy()
opMetadataInjector.Metadata.setValue( metadata )
opReorderAxes.Input.connect( opMetadataInjector.Output )
# Transpose the axes for assignment to the labeling operator.
opReorderAxes.AxisOrder.setValue( writeSeeds.meta.getAxisKeys() )
# We'll show a little window with a busy indicator while the data is loading
busy_dlg = QProgressDialog(parent=parent_widget)
busy_dlg.setLabelText("Importing Label Data...")
busy_dlg.setCancelButton(None)
busy_dlg.setMinimum(100)
busy_dlg.setMaximum(100)
def close_busy_dlg(*args):
QApplication.postEvent(busy_dlg, QCloseEvent())
# Load the data from file into our cache
# When it's done loading, close the progress dialog.
req = opCache.Output[:]
req.notify_finished( close_busy_dlg )
req.notify_failed( close_busy_dlg )
req.submit()
busy_dlg.exec_()
readData = req.result
maxLabels = len(labelingSlots.labelNames.value)
# Can't use return_counts feature because that requires numpy >= 1.9
#unique_read_labels, readLabelCounts = numpy.unique(readData, return_counts=True)
# This does the same as the above, albeit slower, and probably with more ram.
unique_read_labels = numpy.unique(readData)
readLabelCounts = vigra_bincount(readData)[unique_read_labels]
labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
del readData
# Ask the user how to interpret the data.
settingsDlg = LabelImportOptionsDlg( parent_widget,
fileNames, opMetadataInjector.Output,
labelingSlots.labelInput, labelInfo )
def handle_updated_axes():
#.........这里部分代码省略.........
示例10: impl
def impl():
shell = self.shell
workflow = shell.projectManager.workflow
carvingApplet = workflow.carvingApplet
gui = carvingApplet.getMultiLaneGui()
op_carving = carvingApplet.topLevelOperator.getLane(0)
# activate the carving applet
shell.setSelectedAppletDrawer(2)
# let the gui catch up
QApplication.processEvents()
self.waitForViews(gui.currentGui().editor.imageViews)
# inject the labels
op5 = OpReorderAxes(parent=op_carving.parent)
opReader = OpInputDataReader(parent=op_carving.parent)
try:
opReader.FilePath.setValue(f"{self.reference_files['carving_label_file']}/exported_data")
op5.AxisOrder.setValue(op_carving.WriteSeeds.meta.getAxisKeys())
op5.Input.connect(opReader.Output)
label_data = op5.Output[:].wait()
finally:
op5.cleanUp()
opReader.cleanUp()
slicing = roi.fullSlicing(label_data.shape)
op_carving.WriteSeeds[slicing] = label_data
gui.currentGui().labelingDrawerUi.segment.click()
QApplication.processEvents()
op_carving.saveObjectAs("Object 1")
op_carving.deleteObject("<not saved yet>")
# export the mesh:
req = gui.currentGui()._exportMeshes(["Object 1"], [self.output_obj_file])
req.wait()
# compare meshes
with open(self.output_obj_file, "r") as f:
left = f.read()
with open(self.reference_files["output_obj_file"], "r") as f:
right = f.read()
# TODO: might result in errors due to rounding on different systems
assert left == right
# export the completed segments layer
layermatch = [
x.name.startswith("Completed segments (unicolor)") for x in gui.currentGui().editor.layerStack
]
assert sum(layermatch) == 1, "Completed segments (unicolor) Layer expected."
completed_segments_layer = gui.currentGui().editor.layerStack[layermatch.index(True)]
opExport = get_export_operator(completed_segments_layer)
try:
opExport.OutputFilenameFormat.setValue(self.output_file)
opExport.run_export()
finally:
opExport.cleanUp()
assert os.path.exists(self.output_file)
# compare completed segments
with h5py.File(self.reference_files["output_file"], "r") as f_left:
data_left = f_left["exported_data"][:]
with h5py.File(self.output_file, "r") as f_right:
data_right = f_right["exported_data"][:]
numpy.testing.assert_array_almost_equal(data_left, data_right)
# Save the project
saveThread = self.shell.onSaveProjectActionTriggered()
saveThread.join()