本文整理汇总了Python中configargparse.ArgParser.add_argument方法的典型用法代码示例。如果您正苦于以下问题:Python ArgParser.add_argument方法的具体用法?Python ArgParser.add_argument怎么用?Python ArgParser.add_argument使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类configargparse.ArgParser
的用法示例。
在下文中一共展示了ArgParser.add_argument方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _mk_lsq12_parser
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def _mk_lsq12_parser():
p = ArgParser(add_help=False)
# group = parser.add_argument_group("LSQ12 registration options",
# "Options for performing a pairwise, affine registration")
p.set_defaults(run_lsq12=True)
p.add_argument("--run-lsq12", dest="run_lsq12",
action="store_true",
help="Actually run the 12 parameter alignment [default = %(default)s]")
p.add_argument("--no-run-lsq12", dest="run_lsq12",
action="store_false",
help="Opposite of --run-lsq12")
p.add_argument("--lsq12-max-pairs", dest="max_pairs",
type=parse_nullable_int, default=25,
help="Maximum number of pairs to register together ('None' implies all pairs). "
"[Default = %(default)s]")
p.add_argument("--lsq12-likefile", dest="like_file",
type=str, default=None,
help="Can optionally specify a 'like'-file for resampling at the end of pairwise "
"alignment. Default is None, which means that the input file will be used. "
"[Default = %(default)s]")
p.add_argument("--lsq12-protocol", dest="protocol",
type=str,
help="Can optionally specify a registration protocol that is different from defaults. "
"Parameters must be specified as in the following example: \n"
"applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
"[Default = %(default)s].")
return p
示例2: main
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def main():
argparser = ArgParser(description="Load TUPA model and visualize, saving to .png file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
args = argparser.parse_args()
for filename in args.models:
model = load_model(filename)
visualize(model, filename)
示例3: main
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def main():
argparser = ArgParser(description="Load TUPA model and save again to a different file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
argparser.add_argument("-s", "--suffix", default=".1", help="filename suffix to append")
args = argparser.parse_args()
for filename in args.models:
model = load_model(filename)
model.filename += args.suffix
model.classifier.filename += args.suffix
model.save()
示例4: main
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def main():
argparser = ArgParser(description="Visualize scores of a model over the dev set, saving to .png file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
args = argparser.parse_args()
for pattern in args.models:
for filename in sorted(glob(pattern)) or [pattern]:
basename, _ = os.path.splitext(filename)
for div in "dev", "test":
try:
scores = load_scores(basename, div=div)
except OSError:
continue
visualize(scores, basename, div=div)
示例5: _mk_chain_parser
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def _mk_chain_parser():
p = ArgParser(add_help=False)
p.add_argument("--csv-file", dest="csv_file",
type=str, required=True,
help="The spreadsheet with information about your input data. "
"For the registration chain you are required to have the "
"following columns in your csv file: \" subject_id\", "
"\"timepoint\", and \"filename\". Optionally you can have "
"a column called \"is_common\" that indicates that a scan "
"is to be used for the common time point registration "
"using a 1, and 0 otherwise.")
p.add_argument("--common-time-point", dest="common_time_point",
type=int, default=None,
help="The time point at which the inter-subject registration will be "
"performed. I.e., the time point that will link the subjects together. "
"If you want to use the last time point from each of your input files, "
"(they might differ per input file) specify -1. If the common time "
"is not specified, the assumption is that the spreadsheet contains "
"the mapping using the \"is_common\" column. [Default = %(default)s]")
p.add_argument("--common-time-point-name", dest="common_time_point_name",
type=str, default="common",
help="Option to specify a name for the common time point. This is useful for the "
"creation of more readable output file names. Default is \"common\". Note "
"that the common time point is the one created by an iterative group-wise "
"registration (inter-subject).")
return p
示例6: _mk_lsq12_parser
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def _mk_lsq12_parser():
p = ArgParser(add_help=False)
# group = parser.add_argument_group("LSQ12 registration options",
# "Options for performing a pairwise, affine registration")
p.set_defaults(run_lsq12=True)
p.set_defaults(generate_tournament_style_lsq12_avg=False)
p.add_argument("--run-lsq12", dest="run_lsq12",
action="store_true",
help="Actually run the 12 parameter alignment [default = %(default)s]")
p.add_argument("--no-run-lsq12", dest="run_lsq12",
action="store_false",
help="Opposite of --run-lsq12")
p.add_argument("--lsq12-max-pairs", dest="max_pairs",
type=parse_nullable_int, default=25,
help="Maximum number of pairs to register together ('None' implies all pairs). "
"[Default = %(default)s]")
p.add_argument("--lsq12-likefile", dest="like_file",
type=str, default=None,
help="Can optionally specify a 'like'-file for resampling at the end of pairwise "
"alignment. Default is None, which means that the input file will be used. "
"[Default = %(default)s]")
p.add_argument("--lsq12-protocol", dest="protocol",
type=str,
help="Can optionally specify a registration protocol that is different from defaults. "
"Parameters must be specified as in the following example: \n"
"applications_testing/test_data/minctracc_example_linear_protocol.csv \n"
"[Default = %(default)s].")
#p.add_argument("--generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
# action="store_true",
# help="Instead of creating the average of the lsq12 resampled files "
# "by simply averaging them directly, create an iterative average "
# "as follows. Perform a non linear registration between pairs "
# "of files. Resample each file halfway along that transformation "
# "in order for them to end up in the middle. Average those two files. "
# "Then continue on to the next level as in a tournament. [default = %(default)s]")
#p.add_argument("--no-generate-tournament-style-lsq12-avg", dest="generate_tournament_style_lsq12_avg",
# action="store_false",
# help="Opposite of --generate-tournament-style-lsq12-avg")
return p
示例7: _mk_stats_parser
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def _mk_stats_parser():
p = ArgParser(add_help=False)
# p.add_argument_group("Statistics options",
# "Options for calculating statistics.")
default_fwhms = "0.2"
p.set_defaults(stats_kernels=default_fwhms)
p.set_defaults(calc_stats=True)
p.add_argument("--calc-stats", dest="calc_stats",
action="store_true",
help="Calculate statistics at the end of the registration. [Default = %(default)s]")
p.add_argument("--no-calc-stats", dest="calc_stats",
action="store_false",
help="If specified, statistics are not calculated. Opposite of --calc-stats.")
p.add_argument("--stats-kernels", dest="stats_kernels",
type=str,
help="comma separated list of blurring kernels for analysis. [Default = %(default)s].")
return p
示例8: _mk_lsq6_parser
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def _mk_lsq6_parser(with_nuc : bool = True,
with_inormalize : bool = True):
p = ArgParser(add_help=False)
p.set_defaults(lsq6_method="lsq6_large_rotations")
p.set_defaults(nuc = True if with_nuc else False)
p.set_defaults(inormalize = True if with_inormalize else False)
p.set_defaults(copy_header_info=False)
# TODO: should this actually be part of the LSQ6 component? What would it return in this case?
p.set_defaults(run_lsq6=True)
p.add_argument("--run-lsq6", dest="run_lsq6",
action="store_true",
help="Actually run the 6 parameter alignment [default = %(default)s]")
p.add_argument("--no-run-lsq6", dest="run_lsq6",
action="store_false",
help="Opposite of --run-lsq6")
# TODO should be part of some mutually exclusive group ...
p.add_argument("--init-model", dest="init_model",
type=str, default=None,
help="File in standard space in the initial model. The initial model "
"can also have a file in native space and potentially a transformation "
"file. See our wiki (https://wiki.mouseimaging.ca/) for detailed "
"information on initial models. [Default = %(default)s]")
p.add_argument("--lsq6-target", dest="lsq6_target",
type=str, default=None,
help="File to be used as the target for the initial (often 6-parameter) alignment. "
"[Default = %(default)s]")
p.add_argument("--bootstrap", dest="bootstrap",
action="store_true", default=False,
help="Use the first input file to the pipeline as the target for the "
"initial (often 6-parameter) alignment. [Default = %(default)s]")
# TODO: add information about the pride of models to the code in such a way that it
# is reflected on GitHub
p.add_argument("--pride-of-models", dest="pride_of_models",
type=str, default=None,
help="(selected longitudinal pipelines only!) Specify a csv file that contains the mapping of "
"all your initial models at different time points. The idea is that you might "
"want to use different initial models for the time points in your data. "
"The csv file should have one column called \"model_file\", and one column "
"called \"time_point\". The time points can be given in either integer values "
"or float values. Each model file should point to the file in standard space "
"for that particular model. [Default = %(default)s]")
# TODO: do we need to implement this option? This was for Kieran Short, but the procedure
# he will be using in the future most likely will not involve this option.
# group.add_argument("--lsq6-alternate-data-prefix", dest="lsq6_alternate_prefix",
# type=str, default=None,
# help="Specify a prefix for an augmented data set to use for the 6 parameter "
# "alignment. Assumptions: there is a matching alternate file for each regular input "
# "file, e.g. input files are: input_1.mnc input_2.mnc ... input_n.mnc. If the "
# "string provided for this flag is \"aug_\", then the following files should exist: "
# "aug_input_1.mnc aug_input_2.mnc ... aug_input_n.mnc. These files are assumed to be "
# "in the same orientation/location as the regular input files. They will be used for "
# "for the 6 parameter alignment. The transformations will then be used to transform "
# "the regular input files, with which the pipeline will continue.")
p.add_argument("--lsq6-simple", dest="lsq6_method",
action="store_const", const="lsq6_simple",
help="Run a 6 parameter alignment assuming that the input files are roughly "
"aligned: same space, similar orientation. Keep in mind that if you use an "
"initial model with both a standard and a native space, the assumption is "
"that the input files are already roughly aligned to the native space. "
"Three iterations are run: 1st is 17 times stepsize blur, 2nd is 9 times "
"stepsize gradient, 3rd is 4 times stepsize blur. [Default = %(default)s]")
p.add_argument("--lsq6-centre-estimation", dest="lsq6_method",
action="store_const", const="lsq6_centre_estimation",
help="Run a 6 parameter alignment assuming that the input files have a "
"similar orientation, but are scanned in different coils/spaces. [Default = %(default)s]")
p.add_argument("--lsq6-large-rotations", dest="lsq6_method",
action="store_const", const="lsq6_large_rotations",
help="Run a 6 parameter alignment assuming that the input files have a random "
"orientation and are scanned in different coils/spaces. A brute force search over "
"the x,y,z rotation space is performed to find the best 6 parameter alignment. "
"[Default = %(default)s]")
p.add_argument("--lsq6-large-rotations-tmp-dir", dest="rotation_tmp_dir",
type=str, default="/dev/shm/",
help="Specify the directory that rotational_minctracc.py uses for temporary files. "
"By default we use /dev/shm/, because this program involves a lot of I/O, and "
"this is probably one of the fastest way to provide this. [Default = %(default)s]")
p.add_argument("--lsq6-large-rotations-parameters", dest="rotation_params",
type=str, default="5,4,10,8",
help="Settings for the large rotation alignment. factor=factor based on smallest file "
"resolution: 1) blur factor, 2) resample step size factor, 3) registration step size "
"factor, 4) w_translations factor ***** if you are working with mouse brain data "
" the defaults do not have to be based on the file resolution; a default set of "
" settings works for all mouse brain. In order to use those setting, specify: "
"\"mousebrain\" as the argument for this option. ***** [default = %(default)s]")
p.add_argument("--lsq6-rotational-range", dest="rotation_range",
type=int, default=50,
help="Settings for the rotational range in degrees when running the large rotation "
"alignment. [Default = %(default)s]")
p.add_argument("--lsq6-rotational-interval", dest="rotation_interval",
type=int, default=10,
help="Settings for the rotational interval in degrees when running the large rotation "
"alignment. [Default = %(default)s]")
p.add_argument("--nuc", dest="nuc",
action="store_true",
help="Perform non-uniformity correction. [Default = %(default)s]")
p.add_argument("--no-nuc", dest="nuc",
action="store_false",
help="If specified, do not perform non-uniformity correction. Opposite of --nuc.")
p.add_argument("--inormalize", dest="inormalize",
action="store_true",
#.........这里部分代码省略.........
示例9: print
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
if (all([os.path.exists(manifest) for manifest in self.manifests.values()])
and not self.overwrite):
print("Found manfiest files, skipping ingest, use --overwrite to overwrite them.")
return
for setn, manifest in self.manifests.items():
pairs = self.train_or_val_pairs(setn)
records = [(os.path.relpath(fname, self.out_dir), int(tgt))
for fname, tgt in pairs]
records.insert(0, ('@FILE', 'STRING'))
np.savetxt(manifest, records, fmt='%s\t%s')
if __name__ == "__main__":
parser = ArgParser()
parser.add_argument('--input_dir', required=True,
help='Directory to find input tars', default=None)
parser.add_argument('--out_dir', required=True,
help='Directory to write ingested files', default=None)
parser.add_argument('--target_size', type=int, default=256,
help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite files')
args = parser.parse_args()
logger = logging.getLogger(__name__)
bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size,
overwrite=args.overwrite)
bw.run()
示例10: delete_if_exists
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
if "amr" not in keep: # Remove AMR-specific features: node label and category
delete_if_exists((model.feature_params, model.classifier.params), (NODE_LABEL_KEY, "c"))
delete_if_exists((model.classifier.labels, model.classifier.axes), {NODE_LABEL_KEY}.union(FORMATS).difference(keep))
def delete_if_exists(dicts, keys):
for d in dicts:
for key in keys:
try:
del d[key]
except KeyError:
pass
def main(args):
os.makedirs(args.out_dir, exist_ok=True)
for filename in args.models:
model = load_model(filename)
strip_multitask(model, args.keep)
model.filename = os.path.join(args.out_dir, os.path.basename(filename))
model.save()
if __name__ == "__main__":
argparser = ArgParser(description="Load TUPA model and save with just one task's features/weights.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
argparser.add_argument("-k", "--keep", nargs="+", choices=tuple(filter(None, FORMATS)), default=["ucca"],
help="tasks to keep features/weights for")
argparser.add_argument("-o", "--out-dir", default=".", help="directory to write modified model files to")
main(argparser.parse_args())
示例11: open
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
log_file = os.path.join(self.orig_out_dir, 'train.log')
manifest_list_cfg = ', '.join([k+':'+v for k, v in self.manifests.items()])
with open(cfg_file, 'w') as f:
f.write('manifest = [{}]\n'.format(manifest_list_cfg))
f.write('manifest_root = {}\n'.format(self.out_dir))
f.write('log = {}\n'.format(log_file))
f.write('epochs = 90\nrng_seed = 0\nverbose = True\neval_freq = 1\n')
for setn, manifest in self.manifests.items():
if not os.path.exists(manifest):
pairs = self.train_or_val_pairs(setn)
records = [(os.path.relpath(fname, self.out_dir),
os.path.relpath(self._target_filename(int(tgt)), self.out_dir))
for fname, tgt in pairs]
np.savetxt(manifest, records, fmt='%s,%s')
if __name__ == "__main__":
parser = ArgParser()
parser.add_argument('--input_dir', help='Directory to find input tars', default=None)
parser.add_argument('--out_dir', help='Directory to write ingested files', default=None)
parser.add_argument('--target_size', type=int, default=256,
help='Size in pixels to scale shortest side DOWN to (0 means no scaling)')
args = parser.parse_args()
logger = logging.getLogger(__name__)
bw = IngestI1K(input_dir=args.input_dir, out_dir=args.out_dir, target_size=args.target_size)
bw.run()
示例12: __init__
# 需要导入模块: from configargparse import ArgParser [as 别名]
# 或者: from configargparse.ArgParser import add_argument [as 别名]
def __init__(self,chosen_dir=None):
#CMD arguments and configfile
if sys.platform == 'win32':
self.shell=True
locs = [os.path.join(sys.path[0],'phpar2.exe'),
'phpar2.exe',
os.path.join(sys.path[0],'par2.exe'),
'par2.exe',
]
par_cmd = 'par2'
for p in locs:
if os.path.isfile(p):
par_cmd = p
break
else:
self.shell=False
par_cmd = 'par2'
if chosen_dir == None:
parser = ArgParser(default_config_files=['par2deep.ini', '~/.par2deep'])
else:
parser = ArgParser(default_config_files=[os.path.join(chosen_dir,'par2deep.ini'), '~/.par2deep'])
parser.add_argument("-q", "--quiet", action='store_true', help="Don't asks questions, go with all defaults, including repairing and deleting files (default off).")
parser.add_argument("-over", "--overwrite", action='store_true', help="Overwrite existing par2 files (default off).")
parser.add_argument("-novfy", "--noverify", action='store_true', help="Do not verify existing files (default off).")
parser.add_argument("-keep", "--keep_old", action='store_true', help="Keep unused par2 files and old par2 repair files (.1,.2 and so on).")
parser.add_argument("-ex", "--excludes", action="append", type=str, default=[], help="Optionally excludes directories ('root' is files in the root of -dir).")
parser.add_argument("-exex", "--extexcludes", action="append", type=str, default=[], help="Optionally excludes file extensions.")
parser.add_argument("-dir", "--directory", type=str, default=os.getcwd(), help="Path to operate on (default is current directory).")
parser.add_argument("-pc", "--percentage", type=int, default=5, help="Set the parity percentage (default 5%%).")
parser.add_argument("-pcmd", "--par_cmd", type=str, default=par_cmd, help="Set path to alternative par2 command (default \"par2\").")
#lets get a nice dict of all o' that.
args = {k:v for k,v in vars(parser.parse_args()).items() if v is not None}
self.args = args
#add number of files
args["nr_parfiles"] = str(1) #number of parity files
#set that shit
for k,v in self.args.items():
setattr(self, k, v)
return