本文整理汇总了Python中toposort.toposort_flatten方法的典型用法代码示例。如果您正苦于以下问题:Python toposort.toposort_flatten方法的具体用法?Python toposort.toposort_flatten怎么用?Python toposort.toposort_flatten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类toposort
的用法示例。
在下文中一共展示了toposort.toposort_flatten方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: order_by_refs
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def order_by_refs(envs):
"""
Return topologicaly sorted list of environments.
I.e. all referenced environments are placed before their references.
"""
topology = {
env['name']: set(env['refs'])
for env in envs
}
by_name = {
env['name']: env
for env in envs
}
return [
by_name[name]
for name in toposort_flatten(topology)
]
示例2: sorted
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def sorted(self):
if len(self.changes) == 0:
return []
changes = {}
for change in self.changes:
changes[change.id_] = set(change.requires)
sorted_changes = toposort_flatten(changes)
results = []
for change_id in sorted_changes:
for change in self.changes:
if change_id == change.id_:
results.append(change)
break
return results
示例3: expand
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def expand(self, states):
seen = set()
depends = defaultdict(list)
queue = deque()
for state in states:
queue.append(state)
seen.add(state)
while len(queue):
state = queue.popleft()
for arc in self.get_arcs(state, EPSILON):
depends[arc[1]].append((arc[0], arc[3]))
if arc[1] in seen:
continue
queue.append(arc[1])
seen.add(arc[1])
depends_for_toposort = {key: {state for state, weight in value}
for key, value in depends.items()}
order = toposort_flatten(depends_for_toposort)
next_states = states
for next_state in order:
next_states[next_state] = self.combine_weights(
*([next_states.get(next_state)] +
[next_states[prev_state] + weight
for prev_state, weight in depends[next_state]]))
return next_states
示例4: find_joins_for_tables
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def find_joins_for_tables(joins, base_table, required_tables):
"""
Given a set of tables required for a dataset query, this function finds the joins required for the query and
sorts them topologically.
:return:
A list of joins in the order that they must be joined to the query.
:raises:
MissingTableJoinException - If a table is required but there is no join for that table
CircularJoinsException - If there is a circular dependency between two or more joins
"""
dependencies = defaultdict(set)
slicer_joins = {join.table: join for join in joins}
while required_tables:
table = required_tables.pop()
if table not in slicer_joins:
raise MissingTableJoinException(
"Could not find a join for table {}".format(str(table))
)
join = slicer_joins[table]
tables_required_for_join = set(join.criterion.tables_) - {
base_table,
join.table,
}
dependencies[join] |= {
slicer_joins[table] for table in tables_required_for_join
}
required_tables += tables_required_for_join - {d.table for d in dependencies}
try:
return toposort_flatten(dependencies, sort=True)
except CircularDependencyError as e:
raise CircularJoinsException(str(e))
示例5: _apply_traits
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def _apply_traits(self, pipeline_def):
transformers = [trait.transformer() for trait in pipeline_def._traits_dict.values()]
transformers_dict = {t.name: t for t in transformers}
transformer_names = set(transformers_dict.keys())
for transformer in transformers:
if not set(transformer.dependencies()).issubset(transformer_names):
missing = set(transformer.dependencies()) - transformer_names
raise ModelValidationError(
f'{pipeline_def}: trait requires missing traits: ' + ', '.join(missing)
)
# order transformers according to dependencies
transformer_dependencies = {
t.name: t.order_dependencies() & transformer_names for t in transformers
}
ordered_transformers = []
for name in toposort.toposort_flatten(transformer_dependencies):
ordered_transformers.append(transformers_dict[name])
# hardcode meta trait transformer
ordered_transformers.append(MetaTraitTransformer())
# inject new steps
for transformer in ordered_transformers:
for step in transformer.inject_steps():
pipeline_def.add_step(step)
# do remaining processing
for transformer in ordered_transformers:
transformer.process_pipeline_args(pipeline_def)
示例6: correlate
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def correlate(self,project):
# Add procedures, interfaces and types from parent to our lists
self.all_types = {}
for dt in self.types:
self.all_types[dt.name.lower()] = dt
self.all_vars = {}
for var in self.variables:
self.all_vars[var.name.lower()] = var
self.all_absinterfaces = {}
self.all_procs = {}
# Add procedures and types from USED modules to our lists
for mod, extra in self.uses:
if type(mod) is str: continue
procs, absints, types, variables = mod.get_used_entities(extra)
self.all_procs.update(procs)
self.all_absinterfaces.update(absints)
self.all_types.update(types)
self.all_vars.update(variables)
self.uses = [m[0] for m in self.uses]
typelist = {}
for dtype in self.types:
if dtype.extends and dtype.extends.lower() in self.all_types:
dtype.extends = self.all_types[dtype.extends.lower()]
typelist[dtype] = set([dtype.extends])
else:
typelist[dtype] = set([])
typeorder = toposort.toposort_flatten(typelist)
for dtype in typeorder:
dtype.visible = True
if dtype in self.types: dtype.correlate(project)
for var in self.variables:
var.correlate(project)
for com in self.common:
com.correlate(project)
# Sort content
self.sort()
示例7: ontologise
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def ontologise(self):
'''
Takes the TermsToGenes and GenesToTerms dictionaries and
corrects them for an AnnotationSet with a hierarchical ontology.
If a gene is associated with a term, e.g. gene ENSG00000144061 is
associated with the HPO term Nephropathy, it is also associated with
all the ancestors of that term, e.g. ENSG00000144061 must also be
associated with Abnormality of the Kidney.
This function deals with this by taking the TermsToGenes
dictionary and, for each term, taking the descendent terms,
looking up their associated genes and adding them to the TermsToGenes
set for the original term.
'''
TermsToGenes = self.TermsToGenes
TermsToOntP = copy.copy(self.TermsToOnt)
TermsToOntC = self.reverseDict(TermsToOntP)
Adict = dict()
# topologically sorts the terms in the ontology so that
# every term is earlier in the list than all of its ancestors.
sortedterms = toposort_flatten(self.TermsToOnt)
sortedterms_p = []
for s in sortedterms:
if s in TermsToGenes:
sortedterms_p.append(s)
for term in sortedterms_p:
Adict[term] = set()
if term in TermsToOntC:
# return descendents
allids = getAllAncestorsDescendants(term, TermsToOntC)
allids.add(term)
for term2 in allids:
if term2 in TermsToGenes:
Adict[term] = Adict[term] | TermsToGenes[term2]
self.TermsToGenes = Adict
self.GenesToTerms = self.reverseDict(Adict)
示例8: delete_instance_filtered
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def delete_instance_filtered(instance, model_class, delete_nullable, skip_transitive_deletes):
"""
Deletes the DB instance recursively, skipping any models in the skip_transitive_deletes set.
Callers *must* ensure that any models listed in the skip_transitive_deletes must be capable
of being directly deleted when the instance is deleted (with automatic sorting handling
dependency order) - for example, the Manifest and ManifestBlob tables for Repository will
always refer to the *same* repository when Manifest references ManifestBlob, so we can safely
skip transitive deletion for the Manifest table.
Callers *must* catch IntegrityError's raised, as this method will *not* delete the instance
under a transaction, to avoid locking the database.
"""
# We need to sort the ops so that models get cleaned in order of their dependencies
ops = reversed(list(instance.dependencies(delete_nullable)))
filtered_ops = []
dependencies = defaultdict(set)
for query, fk in ops:
# We only want to skip transitive deletes, which are done using subqueries in the form of
# DELETE FROM <table> in <subquery>. If an op is not using a subquery, we allow it to be
# applied directly.
if fk.model not in skip_transitive_deletes or query.op.lower() != "in":
filtered_ops.append((query, fk))
if query.op.lower() == "in":
dependencies[fk.model.__name__].add(query.rhs.model.__name__)
elif query.op == "=":
dependencies[fk.model.__name__].add(model_class.__name__)
else:
raise RuntimeError("Unknown operator in recursive repository delete query")
sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
def sorted_model_key(query_fk_tuple):
cmp_query, cmp_fk = query_fk_tuple
if cmp_query.op.lower() == "in":
return -1
return sorted_models.index(cmp_fk.model.__name__)
filtered_ops.sort(key=sorted_model_key)
# NOTE: We do not use a transaction here, as it can be a VERY long transaction, potentially
# locking up the database. Instead, we expect cleanup code to have run before this point, and
# if this fails with an IntegrityError, callers are expected to catch and retry.
for query, fk in filtered_ops:
_model = fk.model
if fk.null and not delete_nullable:
_model.update(**{fk.name: None}).where(query).execute()
else:
_model.delete().where(query).execute()
return instance.delete().where(instance._pk_expr()).execute()
示例9: get_relation_view
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def get_relation_view():
_views = PreferenceRelationView.get_by(to_dict=True)
views = []
if current_app.config.get("USE_ACL"):
for i in _views:
try:
if ACLManager().has_permission(i.get('name'),
ResourceTypeEnum.RELATION_VIEW,
PermEnum.READ):
views.append(i)
except AbortException:
pass
else:
views = _views
view2cr_ids = dict()
result = dict()
name2id = list()
for view in views:
view2cr_ids.setdefault(view['name'], []).extend(json.loads(view['cr_ids']))
name2id.append([view['name'], view['id']])
id2type = dict()
for view_name in view2cr_ids:
for i in view2cr_ids[view_name]:
id2type[i['parent_id']] = None
id2type[i['child_id']] = None
topo = {i['child_id']: {i['parent_id']} for i in view2cr_ids[view_name]}
leaf = list(set(toposort.toposort_flatten(topo)) - set([j for i in topo.values() for j in i]))
leaf2show_types = {i: [t['child_id'] for t in CITypeRelation.get_by(parent_id=i)] for i in leaf}
node2show_types = copy.deepcopy(leaf2show_types)
def _find_parent(_node_id):
parents = topo.get(_node_id, {})
for parent in parents:
node2show_types.setdefault(parent, []).extend(node2show_types.get(_node_id, []))
_find_parent(parent)
if not parents:
return
for l in leaf:
_find_parent(l)
for node_id in node2show_types:
node2show_types[node_id] = [CITypeCache.get(i).to_dict() for i in set(node2show_types[node_id])]
result[view_name] = dict(topo=list(map(list, toposort.toposort(topo))),
topo_flatten=list(toposort.toposort_flatten(topo)),
leaf=leaf,
leaf2show_types=leaf2show_types,
node2show_types=node2show_types,
show_types=[CITypeCache.get(j).to_dict()
for i in leaf2show_types.values() for j in i])
for type_id in id2type:
id2type[type_id] = CITypeCache.get(type_id).to_dict()
return result, id2type, sorted(name2id, key=lambda x: x[1])
示例10: __init__
# 需要导入模块: import toposort [as 别名]
# 或者: from toposort import toposort_flatten [as 别名]
def __init__(self, thread_id, name, experiment, component_id, max_results, cache_results):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.experiment = experiment
self.comp_id = component_id
self.result = {}
self.max_results = max_results
self.cache_results = cache_results
print "Submitting topology to storm. End component", self.comp_id
exp = Experiment.objects.get(pk=self.experiment)
graph = exp.workflow.graph_data
graph_data = {}
print graph
tmp = graph.split(',')
for elem in tmp:
first_node = elem.split(":")[0]
second_node = elem.split(":")[1]
if second_node in graph_data:
depend_nodes = graph_data[second_node]
depend_nodes.add(first_node)
else:
graph_data[second_node] = set()
graph_data[second_node].add(first_node)
topological_graph = toposort_flatten(graph_data)
print "Graph after topological sort", topological_graph
message = {
'exp_id': self.experiment, 'result': self.comp_id,
'graph': topological_graph, 'components': defaultdict()}
for data in topological_graph:
component_id = int(data)
comp = Component.objects.get(pk=component_id)
if comp.operation_type.function_type == 'Create':
if comp.operation_type.function_arg == 'Table':
filename = comp.operation_type.function_subtype_arg
input_data = read_csv(filename)
message['input'] = {}
for elem in list(input_data.columns):
message['input'][elem] = list(input_data[elem])
message['cols'] = list(input_data.columns)
# message['input'] = input_data.to_dict()
serialized_obj = serializers.serialize('json', [comp.operation_type, ])
print "Component_id", component_id, " ", comp.operation_type
message['components'][data] = serialized_obj
print "Message ", message
r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
self.pubsub = r.pubsub(ignore_subscribe_messages=True)
self.pubsub.subscribe("Exp " + str(self.experiment))
ret = r.publish('workflow', json.dumps(message))
print "return", ret