本文整理汇总了Python中niftynet.io.image_reader.ImageReader.add_preprocessing_layers方法的典型用法代码示例。如果您正苦于以下问题:Python ImageReader.add_preprocessing_layers方法的具体用法?Python ImageReader.add_preprocessing_layers怎么用?Python ImageReader.add_preprocessing_layers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类niftynet.io.image_reader.ImageReader
的用法示例。
在下文中一共展示了ImageReader.add_preprocessing_layers方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_label_reader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def get_label_reader():
reader = ImageReader(['label'])
reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list)
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(SINGLE_25D_TASK).get('label'),
model_filename=os.path.join('testing_data', 'agg_test.txt'))
reader.add_preprocessing_layers(label_normaliser)
pad_layer = PadLayer(image_name=('label',), border=(5, 6, 7))
reader.add_preprocessing_layers([pad_layer])
return reader
示例2: test_preprocessing_zero_padding
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def test_preprocessing_zero_padding(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
reader.add_preprocessing_layers(
[PadLayer(image_name=['image'], border=(0, 0, 0))])
idx, data, interp_order = reader(idx=2)
self.assertEqual(idx, 2)
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
示例3: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.segmentation_param = task_param
# read each line of csv files into an instance of Subject
if self.is_training:
file_lists = []
if self.action_param.validation_every_n > 0:
file_lists.append(data_partitioner.train_files)
file_lists.append(data_partitioner.validation_files)
else:
file_lists.append(data_partitioner.all_files)
self.readers = []
for file_list in file_lists:
reader = ImageReader(SUPPORTED_INPUT)
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
else: # in the inference process use image input only
inference_reader = ImageReader(['image'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)
normalisation_layers = []
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation:
normalisation_layers.append(label_normaliser)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(
normalisation_layers + volume_padding_layer)
示例4: test_trainable_preprocessing
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def test_trainable_preprocessing(self):
label_file = os.path.join('testing_data', 'label_reader.txt')
if os.path.exists(label_file):
os.remove(label_file)
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(LABEL_TASK).get('label'),
model_filename=os.path.join('testing_data', 'label_reader.txt'))
reader = ImageReader(['label'])
with self.assertRaisesRegexp(AssertionError, ''):
reader.add_preprocessing_layers(label_normaliser)
reader.initialise(LABEL_DATA, LABEL_TASK, label_list)
reader.add_preprocessing_layers(label_normaliser)
reader.add_preprocessing_layers(
[PadLayer(image_name=['label'], border=(10, 5, 5))])
idx, data, interp_order = reader(idx=0)
unique_data = np.unique(data['label'])
expected_v1 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
expected_v2 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
compatible_assert = \
np.all(unique_data == expected_v1) or \
np.all(unique_data == expected_v2)
self.assertTrue(compatible_assert)
self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
示例5: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.gan_param = task_param
# read each line of csv files into an instance of Subject
if self.is_training:
file_lists = []
if self.action_param.validation_every_n > 0:
file_lists.append(data_partitioner.train_files)
file_lists.append(data_partitioner.validation_files)
else:
file_lists.append(data_partitioner.train_files)
self.readers = []
for file_list in file_lists:
reader = ImageReader(['image', 'conditioning'])
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
else:
inference_reader = ImageReader(['conditioning'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image',
binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(
self.action_param.rotation_angle)
for reader in self.readers:
reader.add_preprocessing_layers(
normalisation_layers + augmentation_layers)
示例6: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.regression_param = task_param
# read each line of csv files into an instance of Subject
if self.is_training:
file_lists = []
if self.action_param.validation_every_n > 0:
file_lists.append(data_partitioner.train_files)
file_lists.append(data_partitioner.validation_files)
else:
file_lists.append(data_partitioner.train_files)
self.readers = []
for file_list in file_lists:
reader = ImageReader(SUPPORTED_INPUT)
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
else:
inference_reader = ImageReader(['image'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image')
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(
self.action_param.rotation_angle)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(volume_padding_layer +
normalisation_layers +
augmentation_layers)
示例7: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.classification_param = task_param
file_lists = self.get_file_lists(data_partitioner)
# read each line of csv files into an instance of Subject
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader(['image', 'label', 'sampler'])
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
elif self.is_inference:
# in the inference process use image input only
inference_reader = ImageReader(['image'])
inference_reader.initialise(data_param, task_param, file_lists[0])
self.readers = [inference_reader]
elif self.is_evaluation:
reader = ImageReader({'image', 'label', 'inferred'})
reader.initialise(data_param, task_param, file_lists[0])
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
label_normaliser = None
if self.net_param.histogram_ref_file:
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation:
normalisation_layers.append(label_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if self.action_param.rotation_angle:
rotation_layer.init_uniform_angle(
self.action_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
for reader in self.readers:
reader.add_preprocessing_layers(
normalisation_layers +
augmentation_layers)
示例8: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
#.........这里部分代码省略.........
reader = ImageReader({'image', 'label', 'inferred'})
reader.initialise(data_param, task_param, file_list)
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
label_normalisers = None
if self.net_param.histogram_ref_file and \
task_param.label_normalisation:
label_normalisers = [DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)]
if self.is_evaluation:
label_normalisers.append(
DiscreteLabelNormalisationLayer(
image_name='inferred',
modalities=vars(task_param).get('inferred'),
model_filename=self.net_param.histogram_ref_file))
label_normalisers[-1].key = label_normalisers[0].key
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation and \
(self.is_training or not task_param.output_prob):
normalisation_layers.extend(label_normalisers)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if self.action_param.rotation_angle:
rotation_layer.init_uniform_angle(
self.action_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
# add deformation layer
if self.action_param.do_elastic_deformation:
spatial_rank = list(self.readers[0].spatial_ranks.values())[0]
augmentation_layers.append(RandomElasticDeformationLayer(
spatial_rank=spatial_rank,
num_controlpoints=self.action_param.num_ctrl_points,
std_deformation_sigma=self.action_param.deformation_sigma,
proportion_to_augment=self.action_param.proportion_to_deform))
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
# only add augmentation to first reader (not validation reader)
self.readers[0].add_preprocessing_layers(
volume_padding_layer +
normalisation_layers +
augmentation_layers)
for reader in self.readers[1:]:
reader.add_preprocessing_layers(
volume_padding_layer +
normalisation_layers)
示例9: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.regression_param = task_param
file_lists = self.get_file_lists(data_partitioner)
# read each line of csv files into an instance of Subject
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader({'image', 'output', 'weight', 'sampler'})
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
elif self.is_inference:
inference_reader = ImageReader(['image'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_lists[0])
self.readers = [inference_reader]
elif self.is_evaluation:
file_list = data_partitioner.inference_files
reader = ImageReader({'image', 'output', 'inferred'})
reader.initialise(data_param, task_param, file_lists[0])
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image')
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(
self.action_param.rotation_angle)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(volume_padding_layer +
normalisation_layers +
augmentation_layers)
示例10: initialise_dataset_loader
# 需要导入模块: from niftynet.io.image_reader import ImageReader [as 别名]
# 或者: from niftynet.io.image_reader.ImageReader import add_preprocessing_layers [as 别名]
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.segmentation_param = task_param
# read each line of csv files into an instance of Subject
if self.is_training:
file_lists = []
if self.action_param.validation_every_n > 0:
file_lists.append(data_partitioner.train_files)
file_lists.append(data_partitioner.validation_files)
else:
file_lists.append(data_partitioner.train_files)
self.readers = []
for file_list in file_lists:
reader = ImageReader(SUPPORTED_INPUT)
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
else: # in the inference process use image input only
inference_reader = ImageReader(['image'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
label_normaliser = None
if self.net_param.histogram_ref_file:
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation:
normalisation_layers.append(label_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if self.action_param.rotation_angle:
rotation_layer.init_uniform_angle(
self.action_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(
volume_padding_layer +
normalisation_layers +
augmentation_layers)