本文整理汇总了Python中lazyflow.operators.ioOperators.OpInputDataReader类的典型用法代码示例。如果您正苦于以下问题:Python OpInputDataReader类的具体用法?Python OpInputDataReader怎么用?Python OpInputDataReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OpInputDataReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_via_OpExportSlot
def test_via_OpExportSlot(self):
data = 255 * numpy.random.random( (64, 128, 128, 1) )
data = data.astype( numpy.uint8 )
data = vigra.taggedView( data, vigra.defaultAxistags('zyxc') )
graph = Graph()
opPiper = OpArrayPiper(graph=graph)
opPiper.Input.setValue( data )
opExport = OpExportSlot(graph=graph)
opExport.Input.connect( opPiper.Output )
opExport.OutputFormat.setValue( 'dvid' )
url = 'http://{hostname}/api/node/{data_uuid}/{data_name}'.format( **self.__dict__ )
opExport.OutputFilenameFormat.setValue( url )
assert opExport.ExportPath.ready()
assert opExport.ExportPath.value == url
opExport.run_export()
opRead = OpInputDataReader( graph=graph )
try:
opRead.FilePath.setValue( opExport.ExportPath.value )
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output( *roiFromShape(data.shape) ).wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
示例2: testBasic_Npy
def testBasic_Npy(self):
data = numpy.random.random((100, 100)).astype(numpy.float32)
data = vigra.taggedView(data, vigra.defaultAxistags("xy"))
graph = Graph()
opPiper = OpArrayPiper(graph=graph)
opPiper.Input.setValue(data)
opExport = OpExportSlot(graph=graph)
opExport.Input.connect(opPiper.Output)
opExport.OutputFormat.setValue("numpy")
opExport.OutputFilenameFormat.setValue(self._tmpdir + "/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}")
opExport.CoordinateOffset.setValue((10, 20))
assert opExport.ExportPath.ready()
assert os.path.split(opExport.ExportPath.value)[1] == "test_export_x10-110_y20-120.npy"
# print "exporting data to: {}".format( opExport.ExportPath.value )
opExport.run_export()
opRead = OpInputDataReader(graph=graph)
try:
opRead.FilePath.setValue(opExport.ExportPath.value)
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output[:].wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
示例3: testBasic_2d
def testBasic_2d(self):
data = 255 * numpy.random.random((50, 100))
data = data.astype(numpy.uint8)
data = vigra.taggedView(data, vigra.defaultAxistags("yx"))
graph = Graph()
opPiper = OpArrayPiper(graph=graph)
opPiper.Input.setValue(data)
opExport = OpExportSlot(graph=graph)
opExport.Input.connect(opPiper.Output)
opExport.OutputFormat.setValue("png")
opExport.OutputFilenameFormat.setValue(self._tmpdir + "/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}")
opExport.CoordinateOffset.setValue((10, 20))
assert opExport.ExportPath.ready()
assert os.path.split(opExport.ExportPath.value)[1] == "test_export_x20-120_y10-60.png"
opExport.run_export()
opRead = OpInputDataReader(graph=graph)
try:
opRead.FilePath.setValue(opExport.ExportPath.value)
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output[:].wait()
# Note: vigra inserts a channel axis, so read_data is xyc
assert (read_data[..., 0] == expected_data).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
示例4: test_h5_stack_single_file
def test_h5_stack_single_file(self, sequence_axis):
"""Test stack/sequence reading in hdf5-files for given 'sequence_axis'"""
shape = (4, 8, 16, 32, 3) # assuming axis guess order is 'tzyxc'
data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
with h5py.File(self.testH5FileName) as f:
data_group = f.create_group("volumes")
for index, t_slice in enumerate(data):
data_group.create_dataset("timepoint-{index:02d}".format(index=index), data=t_slice)
if sequence_axis == "z":
data = numpy.concatenate(data, axis=0)
elif sequence_axis == "c":
data = numpy.concatenate(data, axis=-1)
h5SequenceReader = OpInputDataReader(graph=self.graph)
h5SequenceReader.SequenceAxis.setValue(sequence_axis)
filenamePlusGlob = "{}/volumes/timepoint-*".format(self.testH5FileName)
try:
h5SequenceReader.FilePath.setValue(filenamePlusGlob)
h5data = h5SequenceReader.Output[:].wait()
assert h5data.shape == data.shape, f"{h5data.shape}, {data.shape}"
numpy.testing.assert_array_equal(h5data, data)
finally:
# Call cleanUp() to close the file that this operator opened
h5SequenceReader.cleanUp()
示例5: test_h5_stack_multi_file
def test_h5_stack_multi_file(self, sequence_axis):
"""Test stack/sequence reading in hdf5-files"""
shape = (4, 8, 16, 32, 3)
data = numpy.random.randint(0, 255, size=shape).astype(numpy.uint8)
for index, t_slice in enumerate(data):
fname = self.testmultiH5FileName.format(index=index)
with h5py.File(fname) as f:
data_group = f.create_group("volume")
data_group.create_dataset("data", data=t_slice)
if sequence_axis == "z":
data = numpy.concatenate(data, axis=0)
elif sequence_axis == "c":
data = numpy.concatenate(data, axis=-1)
h5SequenceReader = OpInputDataReader(graph=self.graph)
h5SequenceReader.SequenceAxis.setValue(sequence_axis)
globString = self.testmultiH5FileName.replace("02d}", "s}").format(index="*")
filenamePlusGlob = "{}/volume/data".format(globString)
try:
h5SequenceReader.FilePath.setValue(filenamePlusGlob)
h5data = h5SequenceReader.Output[:].wait()
assert h5data.shape == data.shape
numpy.testing.assert_array_equal(h5data, data)
finally:
# Call cleanUp() to close the file that this operator opened
h5SequenceReader.cleanUp()
示例6: test_h5
def test_h5(self):
# Create HDF5 test data
with h5py.File(self.testH5FileName) as f:
f.create_group("volume")
shape = (1, 2, 3, 4, 5)
f["volume"].create_dataset(
"data", data=numpy.indices(shape).sum(0).astype(numpy.float32), chunks=True, compression="gzip"
)
# Read the entire HDF5 file and verify the contents
h5Reader = OpInputDataReader(graph=self.graph)
try:
h5Reader.FilePath.setValue(self.testH5FileName + "/volume/data") # Append internal path
cwd = os.path.split(__file__)[0]
h5Reader.WorkingDirectory.setValue(cwd)
# Grab a section of the h5 data
h5Data = h5Reader.Output[0, 0, :, :, :].wait()
assert h5Data.shape == (1, 1, 3, 4, 5)
# (Just check part of the data)
for k in range(0, shape[2]):
for l in range(0, shape[3]):
for m in range(0, shape[4]):
assert h5Data[0, 0, k, l, m] == k + l + m
finally:
# Call cleanUp() to close the file that this operator opened
h5Reader.cleanUp()
assert not h5Reader._file # Whitebox assertion...
示例7: testBasic_Hdf5
def testBasic_Hdf5(self):
data = numpy.random.random( (100,100) ).astype( numpy.float32 )
data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
graph = Graph()
opExport = OpExportSlot(graph=graph)
opExport.Input.setValue(data)
opExport.OutputFormat.setValue( 'hdf5' )
opExport.OutputFilenameFormat.setValue( self._tmpdir + '/test_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
opExport.OutputInternalPath.setValue('volume/data')
opExport.CoordinateOffset.setValue( (10, 20) )
assert opExport.ExportPath.ready()
export_file = PathComponents( opExport.ExportPath.value ).externalPath
assert os.path.split(export_file)[1] == 'test_export_x10-110_y20-120.h5'
#print "exporting data to: {}".format( opExport.ExportPath.value )
opExport.run_export()
opRead = OpInputDataReader( graph=graph )
opRead.FilePath.setValue( opExport.ExportPath.value )
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output[:].wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
opRead.cleanUp()
示例8: test_basic
def test_basic(self):
opData = OpArrayCache( graph=self.graph )
opData.blockShape.setValue( self.testData.shape )
opData.Input.setValue( self.testData )
filepath = os.path.join( self._tmpdir, 'multipage.tiff' )
logger.debug( "writing to: {}".format(filepath) )
opExport = OpExportMultipageTiff(graph=self.graph)
opExport.Filepath.setValue( filepath )
opExport.Input.connect( opData.Output )
# Run the export
opExport.run_export()
opReader = OpInputDataReader( graph=self.graph )
opReader.FilePath.setValue( filepath )
# The reader assumes xyzc order.
# We have to transpose the data before we compare.
opReorderAxes = OpReorderAxes( graph=self.graph )
opReorderAxes.AxisOrder.setValue( self._axisorder )
opReorderAxes.Input.connect( opReader.Output )
readData = opReorderAxes.Output[:].wait()
logger.debug("Expected shape={}".format( self.testData.shape ) )
logger.debug("Read shape={}".format( readData.shape ) )
assert opReorderAxes.Output.meta.shape == self.testData.shape, "Exported files were of the wrong shape or number."
assert (opReorderAxes.Output[:].wait() == self.testData.view( numpy.ndarray )).all(), "Exported data was not correct"
# Cleanup
opReorderAxes.cleanUp()
opReader.cleanUp()
示例9: testBasic
def testBasic(self):
graph = Graph()
opExport = OpFormattedDataExport(graph=graph)
data = numpy.random.random( (100,100) ).astype( numpy.float32 ) * 100
data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
opExport.Input.setValue(data)
sub_roi = [(10, 0), (None, 80)]
opExport.RegionStart.setValue( sub_roi[0] )
opExport.RegionStop.setValue( sub_roi[1] )
opExport.ExportDtype.setValue( numpy.uint8 )
opExport.InputMin.setValue( 0.0 )
opExport.InputMax.setValue( 100.0 )
opExport.ExportMin.setValue( 100 )
opExport.ExportMax.setValue( 200 )
opExport.OutputFormat.setValue( 'hdf5' )
opExport.OutputFilenameFormat.setValue( self._tmpdir + '/export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
opExport.OutputInternalPath.setValue('volume/data')
opExport.TransactionSlot.setValue( True )
assert opExport.ImageToExport.ready()
assert opExport.ExportPath.ready()
assert opExport.ImageToExport.meta.drange == (100,200)
#print "exporting data to: {}".format( opExport.ExportPath.value )
assert opExport.ExportPath.value == self._tmpdir + '/' + 'export_x10-100_y0-80.h5/volume/data'
opExport.run_export()
opRead = OpInputDataReader( graph=graph )
try:
opRead.FilePath.setValue( opExport.ExportPath.value )
# Compare with the correct subregion and convert dtype.
sub_roi[1] = (100, 80) # Replace 'None' with full extent
expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
expected_data = expected_data.astype(numpy.uint8)
expected_data += 100 # see renormalization settings
assert opRead.Output.meta.shape == expected_data.shape
assert opRead.Output.meta.dtype == expected_data.dtype
read_data = opRead.Output[:].wait()
# Due to rounding errors, the actual result and the expected result may differ by 1
# e.g. if the original pixel value was 32.99999999
# Also, must promote to signed values to avoid unsigned rollover
# See issue ( https://github.com/ilastik/lazyflow/issues/165 ).
expected_data_signed = expected_data.astype(numpy.int16)
read_data_signed = expected_data.astype(numpy.int16)
difference_from_expected = expected_data_signed - read_data_signed
assert (numpy.abs(difference_from_expected) <= 1).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
示例10: testBasic
def testBasic(self):
graph = Graph()
opExport = OpDataExport(graph=graph)
try:
opExport.TransactionSlot.setValue(True)
opExport.WorkingDirectory.setValue( self._tmpdir )
# Simulate the important fields of a DatasetInfo object
class MockDatasetInfo(object): pass
rawInfo = MockDatasetInfo()
rawInfo.nickname = 'test_nickname'
rawInfo.filePath = './somefile.h5'
opExport.RawDatasetInfo.setValue( rawInfo )
opExport.SelectionNames.setValue(['Mock Export Data'])
data = numpy.random.random( (100,100) ).astype( numpy.float32 ) * 100
data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
opExport.Inputs.resize(1)
opExport.Inputs[0].setValue(data)
sub_roi = [(10, 20), (90, 80)]
opExport.RegionStart.setValue( sub_roi[0] )
opExport.RegionStop.setValue( sub_roi[1] )
opExport.ExportDtype.setValue( numpy.uint8 )
opExport.OutputFormat.setValue( 'hdf5' )
opExport.OutputFilenameFormat.setValue( '{dataset_dir}/{nickname}_export_x{x_start}-{x_stop}_y{y_start}-{y_stop}' )
opExport.OutputInternalPath.setValue('volume/data')
assert opExport.ImageToExport.ready()
assert opExport.ExportPath.ready()
expected_path = self._tmpdir + '/' + rawInfo.nickname + '_export_x10-90_y20-80.h5/volume/data'
computed_path = opExport.ExportPath.value
assert os.path.normpath(computed_path) == os.path.normpath(expected_path), \
"Expected {}\nGot: {}".format( expected_path, computed_path )
opExport.run_export()
finally:
opExport.cleanUp()
opRead = OpInputDataReader( graph=graph )
try:
opRead.FilePath.setValue( computed_path )
# Compare with the correct subregion and convert dtype.
expected_data = data.view(numpy.ndarray)[roiToSlice(*sub_roi)]
expected_data = expected_data.astype(numpy.uint8)
read_data = opRead.Output[:].wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
示例11: handleImportLabelsAction
def handleImportLabelsAction():
# Find the directory of the most recently opened image file
mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
if mostRecentImageFile is not None:
defaultDirectory = os.path.split(mostRecentImageFile)[0]
else:
defaultDirectory = os.path.expanduser('~')
fileNames = DataSelectionGui.getImageFileNamesToOpen(self, defaultDirectory)
fileNames = list(map(str, fileNames))
# For now, we require a single hdf5 file
if len(fileNames) > 1:
QMessageBox.critical(self, "Too many files",
"Labels must be contained in a single hdf5 volume.")
return
if len(fileNames) == 0:
# user cancelled
return
file_path = fileNames[0]
internal_paths = DataSelectionGui.getPossibleInternalPaths(file_path)
if len(internal_paths) == 0:
QMessageBox.critical(self, "No volumes in file",
"Couldn't find a suitable dataset in your hdf5 file.")
return
if len(internal_paths) == 1:
internal_path = internal_paths[0]
else:
dlg = H5VolumeSelectionDlg(internal_paths, self)
if dlg.exec_() == QDialog.Rejected:
return
selected_index = dlg.combo.currentIndex()
internal_path = str(internal_paths[selected_index])
path_components = PathComponents(file_path)
path_components.internalPath = str(internal_path)
try:
top_op = self.topLevelOperatorView
opReader = OpInputDataReader(parent=top_op.parent)
opReader.FilePath.setValue( path_components.totalPath() )
# Reorder the axes
op5 = OpReorderAxes(parent=top_op.parent)
op5.AxisOrder.setValue( top_op.LabelInputs.meta.getAxisKeys() )
op5.Input.connect( opReader.Output )
# Finally, import the labels
top_op.importLabels( top_op.current_view_index(), op5.Output )
finally:
op5.cleanUp()
opReader.cleanUp()
示例12: test_npy_with_roi
def test_npy_with_roi(self):
a = numpy.indices((100, 100, 200)).astype(numpy.uint8).sum(0)
assert a.shape == (100, 100, 200)
numpy.save(self.testNpyDataFileName, a)
opReader = OpInputDataReader(graph=lazyflow.graph.Graph())
try:
opReader.FilePath.setValue(self.testNpyDataFileName)
opReader.SubVolumeRoi.setValue(((10, 20, 30), (50, 70, 90)))
all_data = opReader.Output[:].wait()
assert all_data.shape == (40, 50, 60)
assert (all_data == a[10:50, 20:70, 30:90]).all()
finally:
opReader.cleanUp()
示例13: testBasic_Dvid
def testBasic_Dvid(self):
if _skip_dvid:
raise nose.SkipTest
# Spin up a mock dvid server to test with.
dvid_dataset, data_uuid, data_name = "datasetA", "abcde", "indices_data"
mockserver_data_file = self._tmpdir + '/mockserver_data.h5'
with H5MockServerDataFile( mockserver_data_file ) as test_h5file:
test_h5file.add_node( dvid_dataset, data_uuid )
server_proc, shutdown_event = H5MockServer.create_and_start( mockserver_data_file, "localhost", 8000,
same_process=False, disable_server_logging=True )
try:
data = 255 * numpy.random.random( (100,100, 4) )
data = data.astype( numpy.uint8 )
data = vigra.taggedView( data, vigra.defaultAxistags('xyc') )
graph = Graph()
opPiper = OpArrayPiper(graph=graph)
opPiper.Input.setValue( data )
opExport = OpExportSlot(graph=graph)
opExport.Input.connect( opPiper.Output )
opExport.OutputFormat.setValue( 'dvid' )
url = 'http://localhost:8000/api/node/{data_uuid}/{data_name}'.format( **locals() )
opExport.OutputFilenameFormat.setValue( url )
assert opExport.ExportPath.ready()
assert opExport.ExportPath.value == url
opExport.run_export()
try:
opRead = OpInputDataReader( graph=graph )
opRead.FilePath.setValue( opExport.ExportPath.value )
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output[:].wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
finally:
opRead.cleanUp()
finally:
shutdown_event.set()
server_proc.join()
示例14: testBasic
def testBasic(self):
data = numpy.random.random( (100,100) ).astype( numpy.float32 )
data = vigra.taggedView( data, vigra.defaultAxistags('xy') )
graph = Graph()
opWriter = OpNpyWriter(graph=graph)
opWriter.Input.setValue(data)
opWriter.Filepath.setValue( self._tmpdir + '/npy_writer_test_output.npy' )
# Write it...
opWriter.write()
opRead = OpInputDataReader( graph=graph )
opRead.FilePath.setValue( opWriter.Filepath.value )
expected_data = data.view(numpy.ndarray)
read_data = opRead.Output[:].wait()
assert (read_data == expected_data).all(), "Read data didn't match exported data!"
opRead.cleanUp()
示例15: setupOutputs
def setupOutputs( self ):
if self._opReader is not None:
self.Output.disconnect()
if self._opMetadataInjector:
self._opMetadataInjector.cleanUp()
self._opMetadataInjector = None
self._opReader.cleanUp()
self._opReader = None
try:
# Configure the reader
dataReady = True
self._opReader = OpInputDataReader( parent=self )
self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
self._opReader.FilePath.setValue( self.DatasetPath.value )
# Since most file formats don't save meta-info,
# The reader output's axis order may be incorrect.
# (For example, if we export in npy format with zxy order,
# the Npy reader op will simply assume xyz order when it reads the data.)
# Force the metadata back to the correct state by copying select items from Input.meta
metadata = {}
metadata['axistags'] = self.Input.meta.axistags
metadata['drange'] = self.Input.meta.drange
metadata['display_mode'] = self.Input.meta.display_mode
self._opMetadataInjector = OpMetadataInjector( parent=self )
self._opMetadataInjector.Input.connect( self._opReader.Output )
self._opMetadataInjector.Metadata.setValue( metadata )
dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
if dataReady:
self.Output.connect( self._opMetadataInjector.Output )
else:
self._opMetadataInjector.cleanUp()
self._opMetadataInjector = None
self._opReader.cleanUp()
self._opReader = None
self.Output.meta.NOTREADY = True
#except OpInputDataReader.DatasetReadError:
except Exception as ex:
#logger.debug( "On-disk image can't be read: {}".format(ex) )
# Note: If the data is exported as a 'sequence', then this will always be NOTREADY
# because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
# That's okay because a stack is probably too slow to be of use for a preview anyway.
if self._opMetadataInjector:
self._opMetadataInjector.cleanUp()
self._opMetadataInjector = None
self._opReader.cleanUp()
self._opReader = None
# The dataset doesn't exist yet.
self.Output.meta.NOTREADY = True