本文整理汇总了Python中aslist.aslist函数的典型用法代码示例。如果您正苦于以下问题:Python aslist函数的具体用法?Python aslist怎么用?Python aslist使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了aslist函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, toolpath_object, validateAs, docpath):
self.names = get_schema()
self.docpath = docpath
self.tool = toolpath_object
# Validate tool documument
validate.validate_ex(self.names.get_name(validateAs, ""), self.tool)
self.validate_requirements(self.tool, "requirements")
self.validate_requirements(self.tool, "hints")
for t in self.tool.get("requirements", []):
t["_docpath"] = docpath
for t in self.tool.get("hints", []):
t["_docpath"] = docpath
# Import schema defs
self.schemaDefs = {
"Any": [
"null",
"boolean",
"int",
"long",
"float",
"double",
"bytes",
"string",
"File",
{"type": "array", "items": "Any"},
{"type": "map", "values": "Any"}
]}
sd, _ = get_feature("SchemaDefRequirement", requirements=self.tool.get("requirements"), hints=self.tool.get("hints"))
if sd:
for i in sd["types"]:
avro.schema.make_avsc_object(i, self.names)
self.schemaDefs[i["name"]] = i
# Build record schema from inputs
self.inputs_record_schema = {"name": "input_record_schema", "type": "record", "fields": []}
for i in self.tool["inputs"]:
c = copy.copy(i)
c["name"] = c["id"][1:]
del c["id"]
if "default" in c:
c["type"] = ["null"] + aslist(c["type"])
self.inputs_record_schema["fields"].append(c)
avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
self.outputs_record_schema = {"name": "outputs_record_schema", "type": "record", "fields": []}
for i in self.tool["outputs"]:
c = copy.copy(i)
c["name"] = c["id"][1:]
del c["id"]
if "default" in c:
c["type"] = ["null"] + aslist(c["type"])
self.outputs_record_schema["fields"].append(c)
avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
示例2: __init__
def __init__(self, toc, j, renderlist, redirects):
self.typedoc = StringIO.StringIO()
self.toc = toc
self.subs = {}
self.docParent = {}
self.docAfter = {}
self.rendered = set()
self.redirects = redirects
self.title = None
for t in j:
if "extends" in t:
for e in aslist(t["extends"]):
add_dictlist(self.subs, e, t["name"])
#if "docParent" not in t and "docAfter" not in t:
# add_dictlist(self.docParent, e, t["name"])
if t.get("docParent"):
add_dictlist(self.docParent, t["docParent"], t["name"])
if t.get("docChild"):
for c in aslist(t["docChild"]):
add_dictlist(self.docParent, t["name"], c)
if t.get("docAfter"):
add_dictlist(self.docAfter, t["docAfter"], t["name"])
_, _, metaschema_loader = schema.get_metaschema()
alltypes = schema.extend_and_specialize(j, metaschema_loader)
self.typemap = {}
self.uses = {}
self.record_refs = {}
for t in alltypes:
self.typemap[t["name"]] = t
try:
if t["type"] == "record":
self.record_refs[t["name"]] = []
for f in t.get("fields", []):
p = has_types(f)
for tp in p:
if tp not in self.uses:
self.uses[tp] = []
if (t["name"], f["name"]) not in self.uses[tp]:
_, frg1 = urlparse.urldefrag(t["name"])
_, frg2 = urlparse.urldefrag(f["name"])
self.uses[tp].append((frg1, frg2))
if tp not in basicTypes and tp not in self.record_refs[t["name"]]:
self.record_refs[t["name"]].append(tp)
except KeyError as e:
_logger.error("Did not find 'type' in %s", t)
raise
for f in alltypes:
if (f["name"] in renderlist or
((not renderlist) and
("extends" not in f) and
("docParent" not in f) and
("docAfter" not in f))):
self.render_type(f, 1)
示例3: collect_output
def collect_output(self, schema, builder, outdir):
r = None
if "outputBinding" in schema:
binding = schema["outputBinding"]
if "glob" in binding:
r = []
bg = builder.do_eval(binding["glob"])
for gb in aslist(bg):
r.extend([{"path": g, "class": "File"} for g in builder.fs_access.glob(os.path.join(outdir, gb))])
for files in r:
checksum = hashlib.sha1()
with builder.fs_access.open(files["path"], "rb") as f:
contents = f.read(CONTENT_LIMIT)
if binding.get("loadContents"):
files["contents"] = contents
filesize = 0
while contents != "":
checksum.update(contents)
filesize += len(contents)
contents = f.read(1024 * 1024)
files["checksum"] = "sha1$%s" % checksum.hexdigest()
files["size"] = filesize
if "outputEval" in binding:
r = builder.do_eval(binding["outputEval"], context=r)
if schema["type"] == "File" and (not isinstance(r, dict) or "path" not in r):
raise WorkflowException("Expression must return a file object.")
if schema["type"] == "File":
if not r:
raise WorkflowException("No matches for output file with glob: '{}'".format(bg))
if len(r) > 1:
raise WorkflowException("Multiple matches for output item that is a single file.")
r = r[0]
if schema["type"] == "File" and "secondaryFiles" in binding:
r["secondaryFiles"] = []
for sf in aslist(binding["secondaryFiles"]):
if isinstance(sf, dict):
sfpath = builder.do_eval(sf, context=r["path"])
else:
sfpath = {"path": substitute(r["path"], sf), "class": "File"}
if isinstance(sfpath, list):
r["secondaryFiles"].extend(sfpath)
else:
r["secondaryFiles"].append(sfpath)
for sf in r["secondaryFiles"]:
if not builder.fs_access.exists(sf["path"]):
raise WorkflowException(
"Missing secondary file of '%s' of primary file '%s'" % (sf["path"], r["path"])
)
if not r and schema["type"] == "record":
r = {}
for f in schema["fields"]:
r[f["name"]] = self.collect_output(f, builder, outdir)
return r
示例4: exeval
def exeval(ex, jobinput, requirements, outdir, tmpdir, context, pull_image):
if ex["engine"] == "https://w3id.org/cwl/cwl#JsonPointer":
try:
obj = {"job": jobinput, "context": context, "outdir": outdir, "tmpdir": tmpdir}
return schema_salad.ref_resolver.resolve_json_pointer(obj, ex["script"])
except ValueError as v:
raise WorkflowException("%s in %s" % (v, obj))
for r in reversed(requirements):
if r["class"] == "ExpressionEngineRequirement" and r["id"] == ex["engine"]:
runtime = []
class DR(object):
pass
dr = DR()
dr.requirements = r.get("requirements", [])
dr.hints = r.get("hints", [])
(docker_req, docker_is_req) = process.get_feature(dr, "DockerRequirement")
img_id = None
if docker_req:
img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
if img_id:
runtime = ["docker", "run", "-i", "--rm", img_id]
exdefs = []
for exdef in r.get("engineConfig", []):
if isinstance(exdef, dict) and "ref" in exdef:
with open(exdef["ref"][7:]) as f:
exdefs.append(f.read())
elif isinstance(exdef, basestring):
exdefs.append(exdef)
inp = {
"script": ex["script"],
"engineConfig": exdefs,
"job": jobinput,
"context": context,
"outdir": outdir,
"tmpdir": tmpdir,
}
_logger.debug("Invoking expression engine %s with %s",
runtime + aslist(r["engineCommand"]),
json.dumps(inp, indent=4))
sp = subprocess.Popen(runtime + aslist(r["engineCommand"]),
shell=False,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = sp.communicate(json.dumps(inp) + "\n\n")
if sp.returncode != 0:
raise WorkflowException("Expression engine returned non-zero exit code on evaluation of\n%s" % json.dumps(inp, indent=4))
return json.loads(stdoutdata)
raise WorkflowException("Unknown expression engine '%s'" % ex["engine"])
示例5: checkFormat
def checkFormat(actualFile, inputFormats, requirements, ontology):
for af in aslist(actualFile):
if "format" not in af:
raise validate.ValidationException("Missing required 'format' for File %s" % af)
for inpf in aslist(inputFormats):
if af["format"] == inpf or formatSubclassOf(af["format"], inpf, ontology, set()):
return
raise validate.ValidationException("Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
示例6: __init__
def __init__(self, toolpath_object, **kwargs):
(_, self.names, _) = get_schema()
self.tool = toolpath_object
self.requirements = kwargs.get("requirements", []) + self.tool.get("requirements", [])
self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
if "loader" in kwargs:
self.formatgraph = kwargs["loader"].graph
else:
self.formatgraph = None
self.validate_hints(self.tool.get("hints", []), strict=kwargs.get("strict"))
self.schemaDefs = {}
sd, _ = self.get_requirement("SchemaDefRequirement")
if sd:
sdtypes = sd["types"]
av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in sdtypes}, set())
for i in av:
self.schemaDefs[i["name"]] = i
avro.schema.make_avsc_object(av, self.names)
# Build record schema from inputs
self.inputs_record_schema = {"name": "input_record_schema", "type": "record", "fields": []}
self.outputs_record_schema = {"name": "outputs_record_schema", "type": "record", "fields": []}
for key in ("inputs", "outputs"):
for i in self.tool[key]:
c = copy.copy(i)
doc_url, _ = urlparse.urldefrag(c['id'])
c["name"] = shortname(c["id"])
del c["id"]
if "type" not in c:
raise validate.ValidationException("Missing `type` in parameter `%s`" % c["name"])
if "default" in c and "null" not in aslist(c["type"]):
c["type"] = ["null"] + aslist(c["type"])
else:
c["type"] = c["type"]
if key == "inputs":
self.inputs_record_schema["fields"].append(c)
elif key == "outputs":
self.outputs_record_schema["fields"].append(c)
try:
self.inputs_record_schema = schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set())
avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException("Got error `%s` while prcoessing inputs of %s:\n%s" % (str(e), self.tool["id"], json.dumps(self.inputs_record_schema, indent=4)))
try:
self.outputs_record_schema = schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set())
avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException("Got error `%s` while prcoessing outputs of %s:\n%s" % (str(e), self.tool["id"], json.dumps(self.outputs_record_schema, indent=4)))
示例7: merge_properties
def merge_properties(a, b):
c = {}
for i in a:
if i not in b:
c[i] = a[i]
for i in b:
if i not in a:
c[i] = b[i]
for i in a:
if i in b:
c[i] = aslist(a[i]) + aslist(b[i])
return c
示例8: object_from_state
def object_from_state(state, parms, frag_only, supportsMultipleInput):
inputobj = {}
for inp in parms:
iid = inp["id"]
if frag_only:
iid = shortname(iid)
if "source" in inp:
if isinstance(inp["source"], list) and not supportsMultipleInput:
raise WorkflowException("Workflow contains multiple inbound links to a single parameter but MultipleInputFeatureRequirement is not declared.")
connections = aslist(inp["source"])
for src in connections:
if src in state and state[src] is not None:
if not match_types(inp["type"], state[src], iid, inputobj,
inp.get("linkMerge", ("merge_nested" if len(connections) > 1 else None)),
valueFrom=inp.get("valueFrom")):
raise WorkflowException("Type mismatch between source '%s' (%s) and sink '%s' (%s)" % (src, state[src].parameter["type"], inp["id"], inp["type"]))
elif src not in state:
raise WorkflowException("Connect source '%s' on parameter '%s' does not exist" % (src, inp["id"]))
else:
return None
elif "default" in inp:
inputobj[iid] = inp["default"]
elif "valueFrom" in inp:
inputobj[iid] = None
else:
raise WorkflowException("Value for %s not specified" % (inp["id"]))
return inputobj
示例9: adjust_for_scatter
def adjust_for_scatter(self, steps):
(scatterSpec, _) = self.get_requirement("ScatterFeatureRequirement")
for step in steps:
if scatterSpec and "scatter" in step.tool:
inputparms = copy.deepcopy(step.tool["inputs"])
outputparms = copy.deepcopy(step.tool["outputs"])
scatter = aslist(step.tool["scatter"])
inp_map = {i["id"]: i for i in inputparms}
for s in scatter:
if s not in inp_map:
raise WorkflowException("Invalid Scatter parameter '%s'" % s)
inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}
if step.tool.get("scatterMethod") == "nested_crossproduct":
nesting = len(scatter)
else:
nesting = 1
for r in xrange(0, nesting):
for i in outputparms:
i["type"] = {"type": "array", "items": i["type"]}
step.tool["inputs"] = inputparms
step.tool["outputs"] = outputparms
示例10: scandeps
def scandeps(base, doc, reffields, urlfields, loadref):
r = []
if isinstance(doc, dict):
if "id" in doc:
if doc["id"].startswith("file://"):
df, _ = urlparse.urldefrag(doc["id"])
if base != df:
r.append({
"class": "File",
"path": df
})
base = df
for k, v in doc.iteritems():
if k in reffields:
for u in aslist(v):
if isinstance(u, dict):
r.extend(scandeps(base, u, reffields, urlfields, loadref))
else:
sub = loadref(base, u)
subid = urlparse.urljoin(base, u)
deps = {
"class": "File",
"path": subid
}
sf = scandeps(subid, sub, reffields, urlfields, loadref)
if sf:
deps["secondaryFiles"] = sf
r.append(deps)
elif k in urlfields:
for u in aslist(v):
r.append({
"class": "File",
"path": urlparse.urljoin(base, u)
})
else:
r.extend(scandeps(base, v, reffields, urlfields, loadref))
elif isinstance(doc, list):
for d in doc:
r.extend(scandeps(base, d, reffields, urlfields, loadref))
return r
示例11: exeval
def exeval(ex, jobinput, requirements, docpath, context, pull_image):
if ex["engine"] == "JsonPointer":
return ref_resolver.resolve_pointer({"job": jobinput, "context": context}, ex["script"])
for r in reversed(requirements):
if r["class"] == "ExpressionEngineRequirement" and r["id"] == ex["engine"]:
if r["id"][0] != "#":
with open(os.path.join(docpath, r["id"])) as f:
ex_obj = yaml.load(f)
sch = process.get_schema()
validate.validate_ex(sch.get_name("ExpressionEngineRequirement", ""), ex_obj)
r = ex_obj
runtime = []
img_id = docker.get_from_requirements(r.get("requirements"), r.get("hints"), pull_image)
if img_id:
runtime = ["docker", "run", "-i", "--rm", img_id]
exdefs = []
for exdef in r.get("expressionDefs", []):
if isinstance(exdef, dict) and "ref" in exdef:
with open(os.path.join(r["_docpath"], exdef["ref"])) as f:
exdefs.append(f.read())
elif isinstance(exdef, basestring):
exdefs.append(exdef)
inp = {
"script": ex["script"],
"expressionDefs": exdefs,
"job": jobinput,
"context": context
}
_logger.debug(json.dumps(inp))
sp = subprocess.Popen(runtime + aslist(r["engineCommand"]),
shell=False,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = sp.communicate(json.dumps(inp) + "\n\n")
if sp.returncode != 0:
raise WorkflowException("Expression engine returned non-zero exit code.")
return json.loads(stdoutdata)
raise WorkflowException("Unknown expression engine '%s'" % ex["engine"])
示例12: add_schemas
def add_schemas(self, ns, base_url):
for sch in aslist(ns):
self.graph.parse(urlparse.urljoin(base_url, sch))
for s, _, _ in self.graph.triples( (None, RDF.type, RDF.Property) ):
self._add_properties(s)
for s, _, o in self.graph.triples( (None, RDFS.subPropertyOf, None) ):
self._add_properties(s)
self._add_properties(o)
for s, _, _ in self.graph.triples( (None, RDFS.range, None) ):
self._add_properties(s)
for s, _, _ in self.graph.triples( (None, RDF.type, OWL.ObjectProperty) ):
self._add_properties(s)
for s, _, _ in self.graph.triples( (None, None, None) ):
self.idx[str(s)] = True
示例13: try_make_job
def try_make_job(self, step, basedir, **kwargs):
inputparms = step.tool["inputs"]
outputparms = step.tool["outputs"]
supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
try:
inputobj = object_from_state(self.state, inputparms, False, supportsMultipleInput)
if inputobj is None:
_logger.debug("[workflow %s] job step %s not ready", id(self), step.id)
return
_logger.debug("[step %s] starting job step %s of workflow %s", id(step), step.id, id(self))
if step.submitted:
return
callback = functools.partial(self.receive_output, step, outputparms)
if "scatter" in step.tool:
scatter = aslist(step.tool["scatter"])
method = step.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
if method == "dotproduct" or method is None:
jobs = dotproduct_scatter(step, inputobj, basedir, scatter, callback, **kwargs)
elif method == "nested_crossproduct":
jobs = nested_crossproduct_scatter(step, inputobj, basedir, scatter, callback, **kwargs)
elif method == "flat_crossproduct":
jobs = flat_crossproduct_scatter(step, inputobj, basedir, scatter, callback, 0, **kwargs)
else:
jobs = step.job(inputobj, basedir, callback, **kwargs)
step.submitted = True
for j in jobs:
yield j
except WorkflowException:
raise
except Exception as e:
_logger.exception("Unhandled exception")
self.processStatus = "permanentFail"
step.completed = True
示例14: add_schemas
def add_schemas(self, ns, base_url):
for sch in aslist(ns):
try:
self.graph.parse(urlparse.urljoin(base_url, sch), format="xml")
except xml.sax.SAXParseException:
self.graph.parse(urlparse.urljoin(base_url, sch), format="turtle")
for s, _, _ in self.graph.triples( (None, RDF.type, RDF.Property) ):
self._add_properties(s)
for s, _, o in self.graph.triples( (None, RDFS.subPropertyOf, None) ):
self._add_properties(s)
self._add_properties(o)
for s, _, _ in self.graph.triples( (None, RDFS.range, None) ):
self._add_properties(s)
for s, _, _ in self.graph.triples( (None, RDF.type, OWL.ObjectProperty) ):
self._add_properties(s)
for s, _, _ in self.graph.triples( (None, None, None) ):
self.idx[str(s)] = True
示例15: try_make_job
def try_make_job(self, step, basedir, **kwargs):
_logger.debug("Try to make job %s", step.id)
inputparms = step.tool["inputs"]
outputparms = step.tool["outputs"]
try:
inputobj = self.object_from_state(inputparms, False)
if inputobj is None:
return
if step.submitted:
return
callback = functools.partial(self.receive_output, step, outputparms)
(scatterSpec, _) = self.get_requirement("ScatterFeatureRequirement")
if scatterSpec and "scatter" in step.tool:
scatter = aslist(step.tool["scatter"])
method = step.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
if method == "dotproduct" or method is None:
jobs = dotproduct_scatter(step, inputobj, basedir, scatter, callback, **kwargs)
elif method == "nested_crossproduct":
jobs = nested_crossproduct_scatter(step, inputobj, basedir, scatter, callback, **kwargs)
elif method == "flat_crossproduct":
jobs = flat_crossproduct_scatter(step, inputobj, basedir, scatter, callback, 0, **kwargs)
else:
jobs = step.job(inputobj, basedir, callback, **kwargs)
step.submitted = True
for j in jobs:
yield j
except Exception as e:
_logger.error(e)
self.processStatus = "permanentFail"
step.completed = True