本文整理汇总了Python中configargparse.ArgParser类的典型用法代码示例。如果您正苦于以下问题:Python ArgParser类的具体用法?Python ArgParser怎么用?Python ArgParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ArgParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
argparser = ArgParser(description="Load TUPA model and visualize, saving to .png file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
args = argparser.parse_args()
for filename in args.models:
model = load_model(filename)
visualize(model, filename)
示例2: _mk_segmentation_parser
def _mk_segmentation_parser(parser : ArgParser, default : bool):
group = parser.add_argument_group("Segmentation", "Segmentation options.")
group.add_argument("--run-maget", action='store_true', dest="run_maget",
help="Run MAGeT segmentation. [default = %(default)s]")
group.add_argument("--no-run-maget", dest="run_maget",
action='store_false', help="Don't run MAGeT segmentation")
parser.set_defaults(run_maget=True)
return parser
示例3: go_2
def go_2(p, current_prefix, current_ns):
if isinstance(p, BaseParser):
new_p = ArgParser(default_config_files=config_files)
for a in p.argparser._actions:
new_a = copy.copy(a)
ss = copy.deepcopy(new_a.option_strings)
for ix, s in enumerate(new_a.option_strings):
if s.startswith("--"):
ss[ix] = "-" + current_prefix + "-" + s[2:]
else:
raise NotImplementedError
new_a.option_strings = ss
new_p._add_action(new_a)
_used_args, _rest = new_p.parse_known_args(args, namespace=current_ns)
# add a "_flags" field to each object so we know what flags caused a certain option to be set:
# (however, note that post-parsing we may munge around ...)
flags_dict = defaultdict(set)
for action in new_p._actions:
for opt in action.option_strings:
flags_dict[action.dest].add(opt)
current_ns.flags_ = Namespace(**flags_dict)
# TODO: could continue parsing from `_rest` instead of original `args`
elif isinstance(p, CompoundParser):
current_ns.flags_ = set() # could also check for the CompoundParser case and not set flags there,
# since there will never be any
for q in p.parsers:
ns = Namespace()
if q.namespace in current_ns.__dict__:
raise ValueError("Namespace field '%s' already in use" % q.namespace)
# TODO could also allow, say, a None
else:
# gross but how to write n-ary identity fn that behaves sensibly on single arg??
current_ns.__dict__[q.namespace] = ns
# FIXME this casting doesn't work for configurations with positional arguments,
# which aren't unpacked correctly -- better to use a namedtuple
# (making all arguments keyword-only also works, but then you have to supply
# often meaningless defaults in the __init__)
go_2(q.parser, current_prefix=current_prefix + (('-' + q.prefix) if q.prefix is not None else ''),
current_ns=ns)
# If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking
# and also allowing the checker to provide hinting for the types of the fields
flags = ns.flags_
del ns.flags_
fixed = (q.cast(current_ns.__dict__[q.namespace]) #(q.cast(**vars(current_ns.__dict__[q.namespace]))
if q.cast else current_ns.__dict__[q.namespace])
if isinstance(fixed, tuple):
fixed = fixed.replace(flags_=flags)
elif isinstance(fixed, Namespace):
setattr(fixed, "flags_", flags)
else:
raise ValueError("currently only Namespace and NamedTuple objects are supported return types from "
"parsing; got %s (a %s)" % (fixed, type(fixed)))
current_ns.__dict__[q.namespace] = fixed
# TODO current_ns or current_namespace or ns or namespace?
else:
raise TypeError("parser %s wasn't a %s (%s or %s) but a %s" %
(p, Parser, BaseParser, CompoundParser, p.__class__))
示例4: main
def main():
# command line option handling
# use an environment variable to look for a default config file
# Alternately, we could use a default location for the file
# (say `files = ['/etc/pydpiper.cfg', '~/pydpiper.cfg', './pydpiper.cfg']`)
# TODO this logic is duplicated in application.py
#if "PYDPIPER_CONFIG_FILE" in os.environ:
default_config_file = os.getenv("PYDPIPER_CONFIG_FILE")
if default_config_file is not None:
try:
with open(PYDPIPER_CONFIG_FILE):
pass
except:
warnings.warn(f"PYDPIPER_CONFIG_FILE is set to '{default_config_file}', which can't be opened.")
if default_config_file is not None:
files = [default_config_file]
else:
files = []
from pydpiper.core.arguments import _mk_execution_parser
parser = ArgParser(default_config_files=files)
_mk_execution_parser(parser)
# using parse_known_args instead of parse_args is a hack since we
# currently send ALL arguments from the main program to the executor.
# Alternately, we could keep a copy of the executor parser around
# when constructing the executor shell command
options, _ = parser.parse_known_args()
ensure_exec_specified(options.num_exec)
def local_launch(options):
pe = pipelineExecutor(options=options, uri_file=options.urifile, pipeline_name="anon-executor") # didn't parse application options so don't have a --pipeline-name
# FIXME - I doubt missing the other options even works, otherwise we could change the executor interface!!
# executors don't use any shared-memory constructs, so OK to copy
ps = [Process(target=launchExecutor, args=(pe,))
for _ in range(options.num_exec)]
for p in ps:
p.start()
for p in ps:
p.join()
if options.local:
local_launch(options)
elif options.submit_server:
roq = q.runOnQueueingSystem(options, sysArgs=sys.argv)
for i in range(options.num_exec):
roq.createAndSubmitExecutorJobFile(i, after=None,
time=q.timestr_to_secs(options.time))
elif options.queue_type is not None:
for i in range(options.num_exec):
pe = pipelineExecutor(options=options, uri_file=options.urifile, pipeline_name="anon-executor")
pe.submitToQueue(1) # TODO is there a reason why we have logic for submitting `i` executors again here?
else:
local_launch(options)
示例5: main
def main():
argparser = ArgParser(description="Visualize scores of a model over the dev set, saving to .png file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
args = argparser.parse_args()
for pattern in args.models:
for filename in sorted(glob(pattern)) or [pattern]:
basename, _ = os.path.splitext(filename)
for div in "dev", "test":
try:
scores = load_scores(basename, div=div)
except OSError:
continue
visualize(scores, basename, div=div)
示例6: create_args
def create_args():
parser = ArgParser()
parser.add('--db_section')
parser.add('--reqnums')
parser.add('--csv')
args = parser.parse_args()
return args
示例7: _mk_thickness_parser
def _mk_thickness_parser(parser : ArgParser):
group = parser.add_argument_group("Thickness", "Thickness calculation options.")
group.add_argument("--run-thickness", action='store_true', dest="run_thickness",
help="Run thickness computation.")
group.add_argument("--no-run-thickness", action='store_false', dest="run_thickness",
help="Don't run thickness computation.")
parser.set_defaults(run_thickness=True)
group.add_argument("--label-mapping", type=str, dest="label_mapping",
help="path to CSV file mapping; see minclaplace/wiki/LaplaceGrid")
group.add_argument("--atlas-fwhm", dest="atlas_fwhm", type=float, # default ?!
help="Blurring kernel (mm) for atlas")
group.add_argument("--thickness-fwhm", dest="thickness_fwhm", type=float, # default??
help="Blurring kernel (mm) for cortical surfaces")
return parser
示例8: _mk_staging_parser
def _mk_staging_parser(parser : ArgParser):
group = parser.add_argument_group("Embryo staging options", "Options for staging embryos in a 4D atlas.")
group.add_argument("--csv-4D", dest="csv_4D", type=str,
help="CSV containing information about the 4D altas. Should contain "
"the following fields: `volume`, `timepoint`, `file`, "
"`mask_file`.")
return parser
示例9: _mk_registration_parser
def _mk_registration_parser(p: ArgParser) -> ArgParser:
g = p.add_argument_group("General registration options",
"....")
# p = ArgParser(add_help=False)
g.add_argument("--input-space", dest="input_space",
type=lambda x: InputSpace[x], # type: ignore # mypy/issues/741
default=InputSpace.native,
# choices=[x for x, _ in InputSpace.__members__.items()],
help="Option to specify space of input-files. Can be native (default), lsq6, lsq12. "
"Native means that there is no prior formal alignment between the input files "
"yet. lsq6 means that the input files have been aligned using translations "
"and rotations; the code will continue with a 12 parameter alignment. lsq12 "
"means that the input files are fully linearly aligned. Only non-linear "
"registrations are performed. [Default=%(default)s]")
g.add_argument("--resolution", dest="resolution",
type=float, default=None,
help="Specify the resolution at which you want the registration to be run. "
"If not specified, the resolution of the target of your pipeline will "
"be used. [Default=%(default)s]")
g.add_argument("--subject-matter", dest="subject_matter",
type=str, default=None,
help="Specify the subject matter for the pipeline. This will set the parameters "
"for multiple programs based on the overall size of the subject matter. Instead "
"of using the resolution of the files. Currently supported option is: \"mousebrain\" "
"[Default=%(default)s]")
return p # g?
示例10: _mk_application_parser
def _mk_application_parser(p: ArgParser) -> ArgParser:
"""
The arguments that all applications share:
--pipeline-name
--restart
--no-restart
--output-dir
--create-graph
--execute
--no-execute
--version
--verbose
--no-verbose
files - leftover arguments (0 or more are allowed)
"""
# p = ArgParser(add_help=False)
g = p.add_argument_group("General application options",
"General options for all pydpiper applications.")
g.add_argument("--restart", dest="restart",
action="store_false", default=True,
help="Restart pipeline using backup files. [default = %(default)s]")
g.add_argument("--pipeline-name", dest="pipeline_name", type=str,
default=time.strftime("pipeline-%d-%m-%Y-at-%H-%m-%S"),
help="Name of pipeline and prefix for models.")
g.add_argument("--no-restart", dest="restart",
action="store_false", help="Opposite of --restart")
# TODO instead of prefixing all subdirectories (logs, backups, processed, ...)
# with the pipeline name/date, we could create one identifying directory
# and put these other directories inside
g.add_argument("--output-dir", dest="output_directory",
type=str,
default='',
help="Directory where output data and backups will be saved.")
g.add_argument("--create-graph", dest="create_graph",
action="store_true", default=False,
help="Create a .dot file with graphical representation of pipeline relationships [default = %(default)s]")
g.set_defaults(execute=True)
g.set_defaults(verbose=True)
g.add_argument("--execute", dest="execute",
action="store_true",
help="Actually execute the planned commands [default = %(default)s]")
g.add_argument("--no-execute", dest="execute",
action="store_false",
help="Opposite of --execute")
g.add_argument("--version", action="version",
version="%(prog)s (" + get_distribution("pydpiper").version + ")") # pylint: disable=E1101
g.add_argument("--verbose", dest="verbose",
action="store_true",
help="Be verbose in what is printed to the screen [default = %(default)s]")
g.add_argument("--no-verbose", dest="verbose",
action="store_false",
help="Opposite of --verbose")
g.add_argument("--files", type=str, nargs='*', metavar='file',
help='Files to process')
g.add_argument("--csv-file", dest="csv_file",
type=str, default=None,
help="CSV file containing application-specific columns. [Default=%(default)s]")
return p
示例11: _mk_chain_parser
def _mk_chain_parser():
p = ArgParser(add_help=False)
p.add_argument("--csv-file", dest="csv_file",
type=str, required=True,
help="The spreadsheet with information about your input data. "
"For the registration chain you are required to have the "
"following columns in your csv file: \" subject_id\", "
"\"timepoint\", and \"filename\". Optionally you can have "
"a column called \"is_common\" that indicates that a scan "
"is to be used for the common time point registration "
"using a 1, and 0 otherwise.")
p.add_argument("--common-time-point", dest="common_time_point",
type=int, default=None,
help="The time point at which the inter-subject registration will be "
"performed. I.e., the time point that will link the subjects together. "
"If you want to use the last time point from each of your input files, "
"(they might differ per input file) specify -1. If the common time "
"is not specified, the assumption is that the spreadsheet contains "
"the mapping using the \"is_common\" column. [Default = %(default)s]")
p.add_argument("--common-time-point-name", dest="common_time_point_name",
type=str, default="common",
help="Option to specify a name for the common time point. This is useful for the "
"creation of more readable output file names. Default is \"common\". Note "
"that the common time point is the one created by an iterative group-wise "
"registration (inter-subject).")
return p
示例12: go_2
def go_2(p, current_prefix, current_ns):
if isinstance(p, BaseParser):
new_p = ArgParser(default_config_files=config_files)
for a in p.argparser._actions:
new_a = copy.copy(a)
ss = copy.deepcopy(new_a.option_strings)
for ix, s in enumerate(new_a.option_strings):
if s.startswith("--"):
ss[ix] = "-" + current_prefix + "-" + s[2:]
else:
raise NotImplementedError
new_a.option_strings = ss
new_p._add_action(new_a)
_used_args, _rest = new_p.parse_known_args(args, namespace=current_ns)
# TODO: could continue parsing from `_rest` instead of original `args`
elif isinstance(p, CompoundParser):
for q in p.parsers:
ns = Namespace()
if q.namespace in current_ns.__dict__:
raise ValueError("Namespace field '%s' already in use" % q.namespace)
# TODO could also allow, say, a None
else:
# gross but how to write n-ary identity fn that behaves sensibly on single arg??
current_ns.__dict__[q.namespace] = ns
# FIXME this casting doesn't work for configurations with positional arguments,
# which aren't unpacked correctly -- better to use a namedtuple
# (making all arguments keyword-only also works, but then you have to supply
# often meaningless defaults in the __init__)
go_2(q.parser, current_prefix=current_prefix + (('-' + q.prefix) if q.prefix is not None else ''),
current_ns=ns)
# If a cast function is provided, apply it to the namespace, possibly doing dynamic type checking
# and also allowing the checker to provide hinting for the types of the fields
current_ns.__dict__[q.namespace] = (q.cast(current_ns.__dict__[q.namespace]) #(q.cast(**vars(current_ns.__dict__[q.namespace]))
if q.cast else current_ns.__dict__[q.namespace])
# TODO current_ns or current_namespace or ns or namespace?
else:
raise TypeError("parser %s wasn't a %s (%s or %s) but a %s" %
(p, Parser, BaseParser, CompoundParser, p.__class__))
示例13: _mk_nlin_parser
def _mk_nlin_parser(p: ArgParser):
group = p.add_argument_group("Nonlinear registration options",
"Options for performing a non-linear registration")
group.add_argument("--registration-method", dest="reg_method",
default="ANTS", choices=["ANTS", "minctracc"],
help="Specify whether to use minctracc or ANTS for non-linear registrations. "
"[Default = %(default)s]")
group.add_argument("--nlin-protocol", dest="nlin_protocol",
type=str, default=None,
help="Can optionally specify a registration protocol that is different from defaults. "
"Parameters must be specified as in either or the following examples: \n"
"applications_testing/test_data/minctracc_example_nlin_protocol.csv \n"
"applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n"
"[Default = %(default)s]")
return p
示例14: main
def main():
argparser = ArgParser(description="Load TUPA model and save again to a different file.")
argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
argparser.add_argument("-s", "--suffix", default=".1", help="filename suffix to append")
args = argparser.parse_args()
for filename in args.models:
model = load_model(filename)
model.filename += args.suffix
model.classifier.filename += args.suffix
model.save()
示例15: _mk_nlin_parser
def _mk_nlin_parser(p: ArgParser):
group = p.add_argument_group("Nonlinear registration options",
"Options for performing a non-linear registration")
group.add_argument("--registration-method", dest="reg_method",
default="ANTS", choices=["ANTS", "antsRegistration", "demons",
"DRAMMS", "elastix", "minctracc"],
help="Specify algorithm used for non-linear registrations. "
"[Default = %(default)s]")
# TODO wire up the choices here in reg_method and reg_strategy to the actual ones ...
group.add_argument("--registration-strategy", dest="reg_strategy",
default="build_model", choices=['build_model', 'pairwise', 'tournament',
'tournament_and_build_model', 'pairwise_and_build_model'],
help="Process used for model construction [Default = %(default)s")
group.add_argument("--nlin-protocol", dest="nlin_protocol",
type=str, default=None,
help="Can optionally specify a registration protocol that is different from defaults. "
"Parameters must be specified as in either or the following examples: \n"
"applications_testing/test_data/minctracc_example_nlin_protocol.csv \n"
"applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n"
"[Default = %(default)s]")
return p