本文整理匯總了Python中pydpiper.pipeline.Pipeline.addStage方法的典型用法代碼示例。如果您正苦於以下問題:Python Pipeline.addStage方法的具體用法?Python Pipeline.addStage怎麽用?Python Pipeline.addStage使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pydpiper.pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline.addStage方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: resampleToCommon
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
def resampleToCommon(xfm, FH, statsGroup, statsKernels, nlinFH):
blurs = []
if isinstance(statsKernels, list):
blurs = statsKernels
elif isinstance(statsKernels, str):
for i in statsKernels.split(","):
blurs.append(float(i))
else:
print("Improper type of blurring kernels specified for stats calculation: " + str(statsKernels))
sys.exit()
pipeline = Pipeline()
outputDirectory = FH.statsDir
filesToResample = []
for b in blurs:
filesToResample.append(statsGroup.relativeJacobians[b])
if statsGroup.absoluteJacobians:
filesToResample.append(statsGroup.absoluteJacobians[b])
for f in filesToResample:
outputBase = removeBaseAndExtension(f).split(".mnc")[0]
outputFile = createBaseName(outputDirectory, outputBase + "_common" + ".mnc")
logFile = fh.logFromFile(FH.logDir, outputFile)
targetAndLike=nlinFH.getLastBasevol()
res = ma.mincresample(f,
targetAndLike,
likeFile=targetAndLike,
transform=xfm,
output=outputFile,
logFile=logFile,
argArray=["-sinc"])
pipeline.addStage(res)
return pipeline
示例2: maskFiles
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
def maskFiles(FH, isAtlas, numAtlases=1):
""" Assume that if there is more than one atlas, multiple
masks were generated and we need to perform a voxel_vote.
Otherwise, assume we are using inputLabels from crossing with
only one atlas.
"""
#MF TODO: Make this more general to handle pairwise option.
p = Pipeline()
if not isAtlas:
if numAtlases > 1:
voxel = voxelVote(FH, False, True)
p.addStage(voxel)
mincMathInput = voxel.outputFiles[0]
else:
mincMathInput = FH.returnLabels(True)[0]
FH.setMask(mincMathInput)
else:
mincMathInput = FH.getMask()
mincMathOutput = fh.createBaseName(FH.resampledDir, FH.basename)
mincMathOutput += "_masked.mnc"
logFile = fh.logFromFile(FH.logDir, mincMathOutput)
cmd = ["mincmath"] + ["-clobber"] + ["-mult"]
cmd += [InputFile(mincMathInput)] + [InputFile(FH.getLastBasevol())]
cmd += [OutputFile(mincMathOutput)]
mincMath = CmdStage(cmd)
mincMath.setLogFile(LogFile(logFile))
p.addStage(mincMath)
FH.setLastBasevol(mincMathOutput)
return(p)
示例3: __init__
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class SetResolution:
def __init__(self, filesToResample, resolution):
"""During initialization make sure all files are resampled
at resolution we'd like to use for each pipeline stage
"""
self.p = Pipeline()
for FH in filesToResample:
dirForOutput = self.getOutputDirectory(FH)
currentRes = volumeFromFile(FH.getLastBasevol()).separations
if not abs(abs(currentRes[0]) - abs(resolution)) < 0.01:
crop = ma.autocrop(resolution, FH, defaultDir=dirForOutput)
self.p.addStage(crop)
mask = FH.getMask()
if mask:
#Need to resample the mask as well.
cropMask = ma.mincresampleMask(FH,
FH,
outputLocation=FH,
likeFile=FH)
self.p.addStage(cropMask)
def getOutputDirectory(self, FH):
"""Sets output directory based on whether or not we have a full
RegistrationPipeFH class or we are just using RegistrationFHBase"""
if isinstance(FH, rfh.RegistrationPipeFH):
outputDir = "resampled"
else:
outputDir = FH.basedir
return outputDir
示例4: __init__
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class LabelAndFileResampling:
def __init__(self,
inputPipeFH,
templatePipeFH,
name="initial",
createMask=False):
self.p = Pipeline()
self.name = name
if createMask:
resampleDefault = "tmp"
labelsDefault = "tmp"
else:
resampleDefault = "resampled"
labelsDefault = "labels"
# Resample all inputLabels
inputLabelArray = templatePipeFH.returnLabels(True)
if len(inputLabelArray) > 0:
""" for the initial registration, resulting labels should be added
to inputLabels array for subsequent pairwise registration
otherwise labels should be added to labels array for voting """
if self.name == "initial":
addOutputToInputLabels = True
else:
addOutputToInputLabels = False
for i in range(len(inputLabelArray)):
"""Note: templatePipeFH and inputPipeFH have the reverse order
from how they are passed into this function. This is intentional
because the mincresample classes use the first argument as the
one from which to get the file to be resampled. Here, either the
mask or labels to be resampled come from the template."""
if createMask:
resampleStage = ma.mincresampleMask(templatePipeFH,
inputPipeFH,
defaultDir=labelsDefault,
likeFile=inputPipeFH,
argArray=["-invert"],
outputLocation=inputPipeFH,
labelIndex=i,
setInputLabels=addOutputToInputLabels)
else:
resampleStage = ma.mincresampleLabels(templatePipeFH,
inputPipeFH,
defaultDir=labelsDefault,
likeFile=inputPipeFH,
argArray=["-invert"],
outputLocation=inputPipeFH,
labelIndex=i,
setInputLabels=addOutputToInputLabels)
self.p.addStage(resampleStage)
# resample files
resampleStage = ma.mincresample(templatePipeFH,
inputPipeFH,
defaultDir=resampleDefault,
likeFile=inputPipeFH,
argArray=["-invert"],
outputLocation=inputPipeFH)
self.p.addStage(resampleStage)
示例5: __init__
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class FullIterativeLSQ12Nlin:
"""Does a full iterative LSQ12 and NLIN. Basically iterative model building starting from LSQ6
and without stats at the end. Designed to be called as part of a larger application.
Specifying an initModel is optional, all other arguments are mandatory."""
def __init__(self, inputs, dirs, options, avgPrefix=None, initModel=None):
self.inputs = inputs
self.dirs = dirs
self.options = options
self.avgPrefix = avgPrefix
self.initModel = initModel
self.nlinFH = None
self.p = Pipeline()
self.buildPipeline()
def buildPipeline(self):
lsq12LikeFH = None
if self.initModel:
lsq12LikeFH = self.initModel[0]
elif self.options.lsq12_likeFile:
lsq12LikeFH = self.options.lsq12_likeFile
lsq12module = lsq12.FullLSQ12(self.inputs,
self.dirs.lsq12Dir,
likeFile=lsq12LikeFH,
maxPairs=self.options.lsq12_max_pairs,
lsq12_protocol=self.options.lsq12_protocol,
subject_matter=self.options.lsq12_subject_matter)
lsq12module.iterate()
self.p.addPipeline(lsq12module.p)
self.lsq12Params = lsq12module.lsq12Params
if lsq12module.lsq12AvgFH.getMask()== None:
if self.initModel:
lsq12module.lsq12AvgFH.setMask(self.initModel[0].getMask())
if not self.avgPrefix:
self.avgPrefix = self.options.pipeline_name
nlinModule = nlin.initializeAndRunNLIN(self.dirs.lsq12Dir,
self.inputs,
self.dirs.nlinDir,
avgPrefix=self.avgPrefix,
createAvg=False,
targetAvg=lsq12module.lsq12AvgFH,
nlin_protocol=self.options.nlin_protocol,
reg_method=self.options.reg_method)
self.p.addPipeline(nlinModule.p)
self.nlinFH = nlinModule.nlinAverages[-1]
self.nlinParams = nlinModule.nlinParams
self.initialTarget = nlinModule.initialTarget
# Now we need the full transform to go back to LSQ6 space
for i in self.inputs:
linXfm = lsq12module.lsq12AvgXfms[i]
nlinXfm = i.getLastXfm(self.nlinFH)
outXfm = st.createOutputFileName(i, nlinXfm, "transforms", "_with_additional.xfm")
xc = ma.xfmConcat([linXfm, nlinXfm], outXfm, fh.logFromFile(i.logDir, outXfm))
self.p.addStage(xc)
i.addAndSetXfmToUse(self.nlinFH, outXfm)
示例6: getXfms
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
def getXfms(nlinFH, subjects, space, mbmDir, time=None):
"""For each file in the build-model registration (associated with the specified
time point), do the following:
1. Find the to-native.xfm for that file.
2. Find the matching subject at the specified time point
3. Set this xfm to be the last xfm from nlin average to subject from step #2.
4. Find the -from-native.xfm file.
5. Set this xfm to be the last xfm from subject to nlin.
Note: assume that the names in processedDir match beginning file
names for each subject
We are also assuming subjects is either a dictionary or a list.
"""
"""First handle subjects if dictionary or list"""
if isinstance(subjects, list):
inputs = subjects
elif isinstance(subjects, dict):
inputs = []
for s in subjects:
inputs.append(subjects[s][time])
else:
logger.error("getXfms only takes a dictionary or list of subjects. Incorrect type has been passed. Exiting...")
sys.exit()
pipeline = Pipeline()
baseNames = walk(mbmDir).next()[1]
for b in baseNames:
if space == "lsq6":
xfmToNative = abspath(mbmDir + "/" + b + "/transforms/" + b + "-final-to_lsq6.xfm")
elif space == "lsq12":
xfmToNative = abspath(mbmDir + "/" + b + "/transforms/" + b + "-final-nlin.xfm")
xfmFromNative = abspath(mbmDir + "/" + b + "/transforms/" + b + "_inv_nonlinear.xfm")
elif space == "native":
xfmToNative = abspath(mbmDir + "/" + b + "/transforms/" + b + "-to-native.xfm")
xfmFromNative = abspath(mbmDir + "/" + b + "/transforms/" + b + "-from-native.xfm")
else:
logger.error("getXfms can only retrieve transforms to and from native, lsq6 or lsq12 space. Invalid parameter has been passed.")
sys.exit()
for inputFH in inputs:
if fnmatch.fnmatch(inputFH.getLastBasevol(), "*" + b + "*"):
if space=="lsq6":
ix = ma.xfmInvert(xfmToNative, inputFH)
pipeline.addStage(ix)
xfmFromNative = ix.outputFiles[0]
nlinFH.setLastXfm(inputFH, xfmToNative)
inputFH.setLastXfm(nlinFH, xfmFromNative)
return pipeline
示例7: __init__
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class HierarchicalMinctracc:
"""Default HierarchicalMinctracc currently does:
1. 2 lsq12 stages with a blur of 0.25
2. 5 nlin stages with a blur of 0.25
3. 1 nlin stage with no blur"""
def __init__(self,
inputPipeFH,
templatePipeFH,
steps=[1,0.5,0.5,0.2,0.2,0.1],
blurs=[0.25,0.25,0.25,0.25,0.25, -1],
gradients=[False, False, True, False, True, False],
iterations=[60,60,60,10,10,4],
simplexes=[3,3,3,1.5,1.5,1],
w_translations=0.2,
linearparams = {'type' : "lsq12", 'simplex' : 1, 'step' : 1},
defaultDir="tmp"):
self.p = Pipeline()
for b in blurs:
#MF TODO: -1 case is also handled in blur. Need here for addStage.
#Fix this redundancy and/or better design?
if b != -1:
tblur = ma.blur(templatePipeFH, b, gradient=True)
iblur = ma.blur(inputPipeFH, b, gradient=True)
self.p.addStage(tblur)
self.p.addStage(iblur)
# Do standard LSQ12 alignment prior to non-linear stages
lsq12reg = lsq12.LSQ12(inputPipeFH,
templatePipeFH,
defaultDir=defaultDir)
self.p.addPipeline(lsq12reg.p)
# create the nonlinear registrations
for i in range(len(steps)):
"""For the final stage, make sure the output directory is transforms."""
if i == (len(steps) - 1):
defaultDir = "transforms"
nlinStage = ma.minctracc(inputPipeFH,
templatePipeFH,
defaultDir=defaultDir,
blur=blurs[i],
gradient=gradients[i],
iterations=iterations[i],
step=steps[i],
similarity=0.8,
w_translations=w_translations,
simplex=simplexes[i])
self.p.addStage(nlinStage)
示例8: maskFiles
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
def maskFiles(FH, isAtlas, numAtlases=1):
""" Assume that if there is more than one atlas, multiple
masks were generated and we need to perform a voxel_vote.
Otherwise, assume we are using inputLabels from crossing with
only one atlas.
"""
#MF TODO: Make this more general to handle pairwise option.
p = Pipeline()
if not isAtlas:
if numAtlases > 1:
voxel = voxelVote(FH, False, True)
p.addStage(voxel)
mincMathInput = voxel.outputFiles[0]
else:
mincMathInput = FH.returnLabels(True)[0]
FH.setMask(mincMathInput)
else:
mincMathInput = FH.getMask()
mincMathOutput = fh.createBaseName(FH.resampledDir, FH.basename)
mincMathOutput += "_masked.mnc"
logFile = fh.logFromFile(FH.logDir, mincMathOutput)
cmd = ["mincmath"] + ["-clobber"] + ["-mult"]
# In response to issue #135
# the order of the input files to mincmath matters. By default the
# first input files is used as a "like file" for the output file.
# We should make sure that the mask is not used for that, because
# it has an image range from 0 to 1; not something we want to be
# set for the masked output file
# average mask
cmd += [InputFile(FH.getLastBasevol())] + [InputFile(mincMathInput)]
cmd += [OutputFile(mincMathOutput)]
mincMath = CmdStage(cmd)
mincMath.setLogFile(LogFile(logFile))
p.addStage(mincMath)
FH.setLastBasevol(mincMathOutput)
return(p)
示例9: stats
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class LongitudinalStatsConcatAndResample:
""" For each subject:
1. Calculate stats (displacement, absolute jacobians, relative jacobians) between i and i+1 time points
2. Calculate transform from subject to common space (nlinFH) and invert it.
For most subjects this will require some amount of transform concatenation.
3. Calculate the stats (displacement, absolute jacobians, relative jacobians) from common space
to each timepoint.
"""
def __init__(self, subjects, timePoint, nlinFH, statsKernels, commonName):
self.subjects = subjects
self.timePoint = timePoint
self.nlinFH = nlinFH
self.blurs = []
self.setupBlurs(statsKernels)
self.commonName = commonName
self.p = Pipeline()
self.buildPipeline()
def setupBlurs(self, statsKernels):
if isinstance(statsKernels, list):
self.blurs = statsKernels
elif isinstance(statsKernels, str):
for i in statsKernels.split(","):
self.blurs.append(float(i))
else:
print("Improper type of blurring kernels specified for stats calculation: " + str(statsKernels))
sys.exit()
def statsCalculation(self, inputFH, targetFH, xfm=None, useChainStats=True):
"""If useChainStats=True, calculate stats between input and target.
This happens for all i to i+1 calcs.
If useChainStats=False, calculate stats in the standard way, from target to
input, We do this, when we go from the common space to all others. """
if useChainStats:
stats = st.CalcChainStats(inputFH, targetFH, self.blurs)
else:
stats = st.CalcStats(inputFH, targetFH, self.blurs)
self.p.addPipeline(stats.p)
"""If an xfm is specified, resample all to this common space"""
if xfm:
if not self.nlinFH:
likeFH = targetFH
else:
likeFH = self.nlinFH
res = resampleToCommon(xfm, inputFH, stats.statsGroup, self.blurs, likeFH)
self.p.addPipeline(res)
def statsAndConcat(self, s, i, count, beforeAvg=True):
"""Construct array to common space for this timepoint.
This builds upon arrays from previous calls."""
if beforeAvg:
xfm = s[i].getLastXfm(s[i+1])
else:
xfm = s[i].getLastXfm(s[i-1])
"""Set this transform as last xfm from input to nlin and calculate nlin to s[i] stats"""
if self.nlinFH:
self.xfmToCommon.insert(0, xfm)
""" Concat transforms to get xfmToCommon and calculate statistics
Note that inverted transform, which is what we want, is calculated in
the statistics module. """
xtc = createBaseName(s[i].transformsDir, s[i].basename + "_to_" + self.commonName + ".xfm")
xc = ma.xfmConcat(self.xfmToCommon, xtc, fh.logFromFile(s[i].logDir, xtc))
self.p.addStage(xc)
# here in order to visually inspect the alignment with the common
# time point, we should resample this subject:
inputResampledToCommon = createBaseName(s[i].resampledDir, s[i].basename + "_to_" + self.commonName + ".mnc")
logToCommon = fh.logFromFile(s[i].logDir, inputResampledToCommon)
resampleCmd = ma.mincresample(s[i],
self.nlinFH,
likeFile=self.nlinFH,
transform=xtc,
output=inputResampledToCommon,
logFile=logToCommon,
argArray=["-sinc"])
self.p.addStage(resampleCmd)
s[i].addAndSetXfmToUse(self.nlinFH, xtc)
self.statsCalculation(s[i], self.nlinFH, xfm=None, useChainStats=False)
else:
xtc=None
"""Calculate i to i+1 stats for all but final timePoint"""
if count - i > 1:
self.statsCalculation(s[i], s[i+1], xfm=xtc, useChainStats=True)
def buildPipeline(self):
for subj in self.subjects:
s = self.subjects[subj]
count = len(s)
"""Wherever iterative model building was run, the indiv --> nlin xfm is stored
in the group with the name "final". We need to use this group for to get the
transform and do the stats calculation, and then reset to the current group.
Calculate stats first from average to timepoint included in average"""
if self.timePoint == -1:
# This means that we used the last file for each of the subjects
# to create the common average. This will be a variable time
# point, so we have to determine it for each of the input files
#.........這裏部分代碼省略.........
示例10: FullLSQ12
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
#.........這裏部分代碼省略.........
self.simplex = []
"""Parse through rows and assign appropriate values to each parameter array.
Everything is read in as strings, but in some cases, must be converted to
floats, booleans or gradients.
"""
for p in params:
if p[0]=="blur":
"""Blurs must be converted to floats."""
for i in range(1,len(p)):
self.blurs.append(float(p[i]))
elif p[0]=="step":
"""Steps are strings but must be converted to a float."""
for i in range(1,len(p)):
self.stepSize.append(float(p[i]))
elif p[0]=="gradient":
"""Gradients must be converted to bools."""
for i in range(1,len(p)):
if p[i]=="True" or p[i]=="TRUE":
self.useGradient.append(True)
elif p[i]=="False" or p[i]=="FALSE":
self.useGradient.append(False)
elif p[0]=="simplex":
"""Simplex must be converted to an int."""
for i in range(1,len(p)):
self.simplex.append(int(p[i]))
else:
print "Improper parameter specified for minctracc protocol: " + str(p[0])
print "Exiting..."
sys.exit()
def getGenerations(self):
arrayLength = len(self.blurs)
errorMsg = "Array lengths in lsq12 minctracc protocol do not match."
if (len(self.stepSize) != arrayLength
or len(self.useGradient) != arrayLength
or len(self.simplex) != arrayLength):
print errorMsg
raise
else:
return arrayLength
def iterate(self):
if not self.maxPairs:
xfmsToAvg = {}
lsq12ResampledFiles = {}
for inputFH in self.inputs:
"""Create an array of xfms, to compute an average lsq12 xfm for each input"""
xfmsToAvg[inputFH] = []
for targetFH in self.inputs:
if inputFH != targetFH:
lsq12 = LSQ12(inputFH,
targetFH,
self.blurs,
self.stepSize,
self.useGradient,
self.simplex)
self.p.addPipeline(lsq12.p)
xfmsToAvg[inputFH].append(inputFH.getLastXfm(targetFH))
"""Create average xfm for inputFH using xfmsToAvg array"""
cmd = ["xfmavg"]
for i in range(len(xfmsToAvg[inputFH])):
cmd.append(InputFile(xfmsToAvg[inputFH][i]))
avgXfmOutput = createBaseName(inputFH.transformsDir, inputFH.basename + "-avg-lsq12.xfm")
cmd.append(OutputFile(avgXfmOutput))
xfmavg = CmdStage(cmd)
xfmavg.setLogFile(LogFile(logFromFile(inputFH.logDir, avgXfmOutput)))
self.p.addStage(xfmavg)
self.lsq12AvgXfms[inputFH] = avgXfmOutput
""" resample brain and add to array for mincAveraging"""
if not self.likeFile:
likeFile=inputFH
else:
likeFile=self.likeFile
rslOutput = createBaseName(inputFH.resampledDir, inputFH.basename + "-resampled-lsq12.mnc")
res = ma.mincresample(inputFH,
inputFH,
transform=avgXfmOutput,
likeFile=likeFile,
output=rslOutput,
argArray=["-sinc"])
self.p.addStage(res)
lsq12ResampledFiles[inputFH] = rslOutput
""" After all registrations complete, setLastBasevol for each subject to be
resampled file in lsq12 space. We can then call mincAverage on fileHandlers,
as it will use the lastBasevol for each by default."""
for inputFH in self.inputs:
inputFH.setLastBasevol(lsq12ResampledFiles[inputFH])
""" mincAverage all resampled brains and put in lsq12Directory"""
self.lsq12Avg = abspath(self.lsq12Dir) + "/" + basename(self.lsq12Dir) + "-pairs.mnc"
self.lsq12AvgFH = RegistrationPipeFH(self.lsq12Avg, basedir=self.lsq12Dir)
avg = ma.mincAverage(self.inputs,
self.lsq12AvgFH,
output=self.lsq12Avg,
defaultDir=self.lsq12Dir)
self.p.addStage(avg)
else:
print "Registration using a specified number of max pairs not yet working. Check back soon!"
sys.exit()
示例11: LSQ12
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class LSQ12(object):
"""Basic LSQ12 class.
This class takes an input FileHandler and a targetFileHandler as required inputs. A series of
minctracc calls will then produce the 12-parameter alignment. The number of minctracc calls
and their parameters are controlled by four further arguments to the constructor:
blurs: an array of floats containing the FWHM of the blurring kernel to be used for each call
gradient: an array of booleans stating whether we should use the blur (False) or gradient (True) of each blur
step: an array of floats containing the step used by minctracc in each call
simplex: an array of floats containing the simplex used by minctracc in each call.
The number of entries in those three (blurs, step, simplex) input arguments determines the number
of minctracc calls executed in this module. For example, the following call:
LSQ12(inputFH, targetFH, blurs=[10,5,2], gradient=[False,True,True], step=[4,4,4], simplex=[20,20,20])
will result in three successive minctracc calls, each initialized with the output transform of the
previous call.
"""
def __init__(self,
inputFH,
targetFH,
blurs=[0.3, 0.2, 0.15],
step=[1,0.5,0.333333333333333],
gradient=[False,True,False],
simplex=[3,1.5,1],
defaultDir="tmp"):
# TO DO: Might want to take this out and pass in # of generations, since
# checking happens there.
if len(blurs) == len(step) == len(simplex):
# do nothing - all lengths are the same and we're therefore happy
pass
else:
logger.error("The same number of entries are required for blurs, step, and simplex in LSQ12")
sys.exit()
self.p = Pipeline()
self.inputFH = inputFH
self.targetFH = targetFH
self.blurs = blurs
self.step = step
self.blurs = blurs
self.gradient = gradient
self.simplex = simplex
self.defaultDir = defaultDir
self.blurFiles()
self.buildPipeline()
def blurFiles(self):
for b in self.blurs:
if b != -1:
tblur = ma.blur(self.targetFH, b, gradient=True)
iblur = ma.blur(self.inputFH, b, gradient=True)
self.p.addStage(tblur)
self.p.addStage(iblur)
def buildPipeline(self):
for i in range(len(self.blurs)):
linearStage = ma.minctracc(self.inputFH,
self.targetFH,
blur=self.blurs[i],
defaultDir=self.defaultDir,
gradient=self.gradient[i],
linearparam="lsq12",
step=self.step[i],
simplex=self.simplex[i])
self.p.addStage(linearStage)
示例12: NLINBase
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
#.........這裏部分代碼省略.........
"""Create the blurring resolution from the file resolution"""
try: # the attempt to access the minc volume will fail if it doesn't yet exist at pipeline creation
self.fileRes = rf.getFinestResolution(self.target)
except:
# if it indeed failed, get resolution from the original file specified for
# one of the input files, which should exist.
# Can be overwritten by the user through specifying a nonlinear protocol.
self.fileRes = rf.getFinestResolution(self.inputs[0].inputFileName)
"""
Set default parameters before checking to see if a non-linear protocol has been
specified. This is done first, since a non-linear protocol may specify only
a subset of the parameters, but all parameters must be set for the registration
to run properly.
After default parameters are set, check for a specified non-linear protocol and
override these parameters accordingly. Currently, this protocol must
be a csv file that uses a SEMI-COLON to separate the fields. Examples are:
pydpiper_apps_testing/test_data/minctracc_example_protocol.csv
pydpiper_apps_testing/test_data/mincANTS_example_protocol.csv
Each row in the csv is a different input to the either a minctracc or mincANTS call
Although the number of entries in each row (e.g. generations) is variable, the
specific parameters are fixed. For example, one could specify a subset of the
allowed parameters (e.g. blurs only) but could not rename any parameters
or use additional ones that haven't already been defined without subclassing. See
documentation for additional details.
Note that if no protocol is specified, then defaults will be used.
Based on the length of these parameter arrays, the number of generations is set.
"""
self.defaultParams()
if nlin_protocol:
self.setParams(nlin_protocol)
self.generations = self.getGenerations()
# Create new nlin group for each input prior to registration
for i in range(len(self.inputs)):
self.inputs[i].newGroup(groupName="nlin")
def defaultParams(self):
"""Set default parameters for each registration type in subclasses."""
pass
def setParams(self):
"""Override parameters based on specified non-linear protocol."""
pass
def getGenerations(self):
"""Get number of generations based on length of parameter arrays. """
pass
def addBlurStage(self):
"""
Add blurs to pipeline. Because blurs are handled differently by
parameter arrays in minctracc and mincANTS subclasses, they are added
to the pipeline via function call.
"""
pass
def regAndResample(self):
"""Registration and resampling calls"""
pass
def iterate(self):
for i in range(self.generations):
nlinOutput = abspath(self.nlinDir) + "/" + "nlin-%g.mnc" % (i+1)
nlinFH = RegistrationPipeFH(nlinOutput, mask=self.target.getMask(), basedir=self.nlinDir)
self.addBlurStage(self.target, i)
filesToAvg = []
for inputFH in self.inputs:
self.addBlurStage(inputFH, i)
self.regAndResample(inputFH, i, filesToAvg, nlinFH)
"""Because we don't reset lastBasevol on each inputFH, call mincAverage with files only.
We create fileHandler first though, so we have log directory.
This solution seems a bit hackish--may want to modify?
Additionally, we are currently using the full RegistrationPipeFH class, but ultimately
we'll want to create a third class that is somewhere between a full and base class.
"""
logBase = removeBaseAndExtension(nlinOutput)
avgLog = createLogFile(nlinFH.logDir, logBase)
avg = mincAverage(filesToAvg, nlinOutput, logFile=avgLog)
self.p.addStage(avg)
"""Reset target for next iteration and add to array"""
self.target = nlinFH
self.nlinAverages.append(nlinFH)
"""Create a final nlin group to add to the inputFH.
lastBasevol = by default, will grab the lastBasevol used in these calculations (e.g. lsq12)
setLastXfm between final nlin average and inputFH will be set for stats calculations.
"""
if i == (self.generations -1):
for inputFH in self.inputs:
"""NOTE: The last xfm being set below is NOT the result of a registration between
inputFH and nlinFH, but rather is the output transform from the previous generation's
average."""
finalXfm = inputFH.getLastXfm(self.nlinAverages[self.generations-2])
inputFH.newGroup(groupName="final")
inputFH.setLastXfm(nlinFH, finalXfm)
示例13: mincresampleFileAndMask
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class mincresampleFileAndMask(object):
"""
If the input file to mincresample(CmdStage) is a file handler, and there is
a mask associated with the file, the most intuitive thing to do is
to resample both the file and the mask. However, a true atom/command stage
can only create a single stage, and a such mincresample(CmdStage) can not
resample both. When using a file handler, the mask file associated with it
is used behind the scenes without the user explicitly specifying this behaviour.
That's why it is important that the mask always remains current/up-to-date. The
best way to do that is to automatically resample the associated mask when the
main file is being resampled. And that is where this class comes in. It serves
as a wrapper around mincresample(CmdStage) and mincresampleMask(CmdStage). It
will check whether the input file is a file handler, and if so, will resample
the mask that is associated with it (if it exists).
This class is not truly an atom/command stage, so technically should not live in
the minc_atoms module. It is still kept here because in essence it serves as a
single indivisible stage. (and because the user is more likely to call/find it
when looking for the mincresample stage)
"""
def __init__(self,
inFile,
targetFile,
nameForStage=None,
**kwargs):
self.p = Pipeline()
self.outputFiles = [] # this will contain the outputFiles from the mincresample of the main MINC file
self.outputFilesMask = [] # this will contain the outputFiles from the mincresample of the mask belonging to the main MINC file
# the first step is to simply run the mincresample command:
fileRS = mincresample(inFile,
targetFile,
**kwargs)
if(nameForStage):
fileRS.name = nameForStage
self.p.addStage(fileRS)
self.outputFiles = fileRS.outputFiles
# initialize the array of outputs for the mask in case there is none to be resampled
self.outputFilesMask = [None] * len(self.outputFiles)
# next up, is this a file handler, and if so is there a mask that needs to be resampled?
if(isFileHandler(inFile)):
if(inFile.getMask()):
# there is a mask associated with this file, should be updated
# we have to watch out in terms of interpolation arguments, if
# the original resample command asked for "-sinc" or "-tricubic"
# for instance, we should remove that argument for the mask resampling
# these options would reside in the argArray...
maskArgs = copy.deepcopy(kwargs)
if(maskArgs["argArray"]):
argList = maskArgs["argArray"]
for i in range(len(argList)):
if(re.match("-sinc", argList[i]) or
re.match("-trilinear", argList[i]) or
re.match("-tricubic", argList[i]) ):
del argList[i]
maskArgs["argArray"] = argList
# if the output file for the mincresample command was already
# specified, add "_mask.mnc" to it
if(maskArgs["output"]):
maskArgs["output"] = re.sub(".mnc", "_mask.mnc", maskArgs["output"])
maskRS = mincresampleMask(inFile,
targetFile,
**maskArgs)
if(nameForStage):
maskRS.name = nameForStage + "--mask--"
self.p.addStage(maskRS)
self.outputFilesMask = maskRS.outputFiles
示例14: createQualityControlImages
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class createQualityControlImages(object):
"""
This class takes a list of input files and creates
a set of quality control (verification) images. Optionally
these images can be combined in a single montage image for
easy viewing
If the inputFiles are fileHandler, the last base volume
will be used to create the images from.
The scaling factor corresponds to the the mincpik -scale
parameter
"""
def __init__(self,
inputFiles,
createMontage=True,
montageOutPut=None,
scalingFactor=20,
message="lsq6"):
self.p = Pipeline()
self.individualImages = []
self.individualImagesLabeled = []
self.message = message
if createMontage and montageOutPut == None:
print("\nError: createMontage is specified in createQualityControlImages, but no output name for the montage is provided. Exiting...\n")
sys.exit()
# for each of the input files, run a mincpik call and create
# a triplane image.
for inFile in inputFiles:
if isFileHandler(inFile):
# create command using last base vol
inputToMincpik = inFile.getLastBasevol()
outputMincpik = createBaseName(inFile.tmpDir,
removeBaseAndExtension(inputToMincpik) + "_QC_image.png")
cmd = ["mincpik", "-clobber",
"-scale", scalingFactor,
"-triplanar",
InputFile(inputToMincpik),
OutputFile(outputMincpik)]
mincpik = CmdStage(cmd)
mincpik.setLogFile(LogFile(logFromFile(inFile.logDir, outputMincpik)))
self.p.addStage(mincpik)
self.individualImages.append(outputMincpik)
# we should add a label to each of the individual images
# so it will be easier for the user to identify what
# which images potentially fail
outputConvert = createBaseName(inFile.tmpDir,
removeBaseAndExtension(inputToMincpik) + "_QC_image_labeled.png")
cmdConvert = ["convert", "-label", inFile.basename,
InputFile(outputMincpik),
OutputFile(outputConvert)]
convertAddLabel = CmdStage(cmdConvert)
convertAddLabel.setLogFile(LogFile(logFromFile(inFile.logDir, outputConvert)))
self.p.addStage(convertAddLabel)
self.individualImagesLabeled.append(outputConvert)
# if montageOutput is specified, create the overview image
if createMontage:
cmdmontage = ["montage", "-geometry", "+2+2"] \
+ map(InputFile, self.individualImagesLabeled) + [OutputFile(montageOutPut)]
montage = CmdStage(cmdmontage)
montage.setLogFile(splitext(montageOutPut)[0] + ".log")
message_to_print = "\n* * * * * * *\nPlease consider the following verification "
message_to_print += "image, which shows a slice through all input "
message_to_print += "files %s. " % self.message
message_to_print += "\n%s\n" % (montageOutPut)
message_to_print += "* * * * * * *\n"
# the hook needs a return. Given that "print" does not return
# anything, we need to encapsulate the print statement in a
# function (which in this case will return None, but that's fine)
def printMessageForMontage():
print(message_to_print)
montage.finished_hooks.append(
lambda : printMessageForMontage())
self.p.addStage(montage)
示例15: initializeAndRunNLIN
# 需要導入模塊: from pydpiper.pipeline import Pipeline [as 別名]
# 或者: from pydpiper.pipeline.Pipeline import addStage [as 別名]
class initializeAndRunNLIN(object):
"""Class to setup target average (if needed),
instantiate correct version of NLIN class,
and run NLIN registration."""
def __init__(self,
targetOutputDir, #Output directory for files related to initial target (often _lsq12)
inputFiles,
nlinDir,
avgPrefix, #Prefix for nlin-1.mnc, ... nlin-k.mnc
createAvg=True, #True=call mincAvg, False=targetAvg already exists
targetAvg=None, #Optional path to initial target - passing name does not guarantee existence
targetMask=None, #Optional path to mask for initial target
nlin_protocol=None,
reg_method=None):
self.p = Pipeline()
self.targetOutputDir = targetOutputDir
self.inputFiles = inputFiles
self.nlinDir = nlinDir
self.avgPrefix = avgPrefix
self.createAvg = createAvg
self.targetAvg = targetAvg
self.targetMask = targetMask
self.nlin_protocol = nlin_protocol
self.reg_method = reg_method
# setup initialTarget (if needed) and initialize non-linear module
self.setupTarget()
self.initNlinModule()
#iterate through non-linear registration and setup averages
self.nlinModule.iterate()
self.p.addPipeline(self.nlinModule.p)
self.nlinAverages = self.nlinModule.nlinAverages
self.nlinParams = self.nlinModule.nlinParams
def setupTarget(self):
if self.targetAvg:
if isinstance(self.targetAvg, str):
self.initialTarget = RegistrationPipeFH(self.targetAvg,
mask=self.targetMask,
basedir=self.targetOutputDir)
self.outputAvg = self.targetAvg
elif isinstance(self.targetAvg, RegistrationPipeFH):
self.initialTarget = self.targetAvg
self.outputAvg = self.targetAvg.getLastBasevol()
if not self.initialTarget.getMask():
if self.targetMask:
self.initialTarget.setMask(self.targetMask)
else:
print "You have passed a target average that is neither a string nor a file handler: " + str(self.targetAvg)
print "Exiting..."
else:
self.targetAvg = abspath(self.targetOutputDir) + "/" + "initial-target.mnc"
self.initialTarget = RegistrationPipeFH(self.targetAvg,
mask=self.targetMask,
basedir=self.targetOutputDir)
self.outputAvg = self.targetAvg
if self.createAvg:
avg = mincAverage(self.inputFiles,
self.initialTarget,
output=self.outputAvg,
defaultDir=self.targetOutputDir)
self.p.addStage(avg)
def initNlinModule(self):
if self.reg_method=="mincANTS":
self.nlinModule = NLINANTS(self.inputFiles, self.initialTarget, self.nlinDir, self.avgPrefix, self.nlin_protocol)
elif self.reg_method=="minctracc":
self.nlinModule = NLINminctracc(self.inputFiles, self.initialTarget, self.nlinDir, self.avgPrefix, self.nlin_protocol)
else:
logger.error("Incorrect registration method specified: " + self.reg_method)
sys.exit()