本文整理汇总了Python中sqlalchemy.util.defaultdict函数的典型用法代码示例。如果您正苦于以下问题:Python defaultdict函数的具体用法?Python defaultdict怎么用?Python defaultdict使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了defaultdict函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self):
from test.bootstrap.config import options
self.write = options.write_profiles
dirname, fname = os.path.split(__file__)
self.short_fname = "profiles.txt"
self.fname = os.path.join(dirname, self.short_fname)
self.data = util.defaultdict(lambda: util.defaultdict(dict))
self._read()
if self.write:
# rewrite for the case where features changed,
# etc.
self._write()
示例2: _collect_delete_commands
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise sa_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
示例3: find_cycles
def find_cycles(tuples, allitems):
# straight from gvr with some mods
todo = set(allitems)
edges = util.defaultdict(set)
for parent, child in tuples:
edges[parent].add(child)
output = set()
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in edges[top]:
if node in stack:
cyc = stack[stack.index(node):]
todo.difference_update(cyc)
output.update(cyc)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return output
示例4: get_indexes
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, field_name
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row['index_name']]
if 'name' not in indexrec:
indexrec['name'] = self.normalize_name(row['index_name'])
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return indexes.values()
示例5: __init__
def __init__(self, session):
self.session = session
self.mapper_flush_opts = session._mapper_flush_opts
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
示例6: get_foreign_keys
def get_foreign_keys(self, connection, tablename, schema=None, **kw):
current_schema = schema or self.default_schema_name
# Add constraints
RR = ischema.ref_constraints #information_schema.referential_constraints
TC = ischema.constraints #information_schema.table_constraints
C = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage:
# the constrained column
R = ischema.key_constraints.alias('R') # information_schema.constraint_column_usage:
# the referenced column
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option, RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == current_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name == RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by = [RR.c.constraint_name, R.c.ordinal_position])
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or current_schema != rschema:
rec['referred_schema'] = rschema
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return fkeys.values()
示例7: get_foreign_keys
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_sel = schema or self.default_schema_name
c = connection.execute(
"""select t1.constrname as cons_name,
t4.colname as local_column, t7.tabname as remote_table,
t6.colname as remote_column, t7.owner as remote_owner
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
sysconstraints as t8 , sysindexes as t9
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname =
t8.idxname
and t7.tabid = t5.ptabid""", table_name, schema_sel)
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
rows = c.fetchall()
for cons_name, local_column, \
remote_table, remote_column, remote_owner in rows:
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = \
rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = remote_table
if schema is not None:
rec['referred_schema'] = remote_owner
if local_column not in local_cols:
local_cols.append(local_column)
if remote_column not in remote_cols:
remote_cols.append(remote_column)
return fkeys.values()
示例8: conforms_partial_ordering
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
示例9: assert_sort
def assert_sort(self, tuples, allitems=None):
if allitems is None:
allitems = self._nodes_from_tuples(tuples)
else:
allitems = self._nodes_from_tuples(tuples).union(allitems)
result = list(topological.sort(tuples, allitems))
deps = util.defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
assert len(result)
for i, node in enumerate(result):
for n in result[i:]:
assert node not in deps[n]
示例10: get_foreign_keys
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_int = schema or connection.engine.url.database
sql_stmnt = "SELECT constraint_name, column_name, referenced_schema, referenced_table, " \
"referenced_column FROM SYS.EXA_ALL_CONSTRAINT_COLUMNS " \
"WHERE constraint_type = 'FOREIGN KEY' AND constraint_table = :table_name " \
"AND constraint_schema = "
if schema_int is None:
sql_stmnt += "CURRENT_SCHEMA "
else:
sql_stmnt += ":schema "
sql_stmnt += "ORDER BY ordinal_position"
rp = connection.execute(sql.text(sql_stmnt),
table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema_int))
constraint_data = rp.fetchall()
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, local_column, remote_schema, remote_table, remote_column) = \
(row[0], row[1], row[2], row[3], row[4])
rec = fkeys[self.normalize_name(cons_name)]
rec['name'] = self.normalize_name(cons_name)
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = self.normalize_name(remote_table)
# we need to take care of calls without schema. the sqla test suite
# expects referred_schema to be None if None is passed in to this function
if schema is None and schema_int == self.normalize_name(remote_schema):
rec['referred_schema'] = None
else:
rec['referred_schema'] = self.normalize_name(remote_schema)
local_cols.append(self.normalize_name(local_column))
remote_cols.append(self.normalize_name(remote_column))
return fkeys.values()
示例11: all_partial_orderings
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
示例12: get_foreign_keys
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# FK
c = connection.execute(
"""select t1.constrname as cons_name , t1.constrtype as cons_type ,
t4.colname as local_column , t7.tabname as remote_table ,
t6.colname as remote_column
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
sysconstraints as t8 , sysindexes as t9
where t1.tabid = t2.tabid and t2.tabname=? and t1.constrtype = 'R'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno = t3.part1
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno = t9.part1 and t9.idxname =
t8.idxname
and t7.tabid = t5.ptabid""", table.name.lower())
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for cons_name, cons_type, local_column, \
remote_table, remote_column in rows:
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = \
rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = remote_table
local_cols.append(local_column)
remote_cols.append(remote_column)
return fkeys.values()
示例13: get_foreign_keys
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(
lambda: {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
)
for row in c:
cname = self.normalize_name(row["cname"])
fk = fks[cname]
if not fk["name"]:
fk["name"] = cname
fk["referred_table"] = self.normalize_name(row["targetrname"])
fk["constrained_columns"].append(self.normalize_name(row["fname"]))
fk["referred_columns"].append(
self.normalize_name(row["targetfname"])
)
return list(fks.values())
示例14: sort_as_subsets
def sort_as_subsets(tuples, allitems):
edges = util.defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
todo = set(allitems)
while todo:
output = set()
for node in list(todo):
if not todo.intersection(edges[node]):
output.add(node)
if not output:
raise CircularDependencyError(
"Circular dependency detected: cycles: %r all edges: %s" %
(find_cycles(tuples, allitems), _dump_edges(edges, True)))
todo.difference_update(output)
yield output
示例15: get_foreign_keys
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_int = schema or connection.engine.url.database
if schema_int is None:
schema_int = connection.execute("select CURRENT_SCHEMA from dual").scalar()
table_name=self.denormalize_name(table_name)
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for row in self._get_all_constraints(connection, schema=schema, info_cache=kw.get("info_cache")):
if (row[5] != table_name and table_name is not None) or row[6] != 'FOREIGN KEY':
continue
(cons_name, local_column, remote_schema, remote_table, remote_column) = \
(row[0], row[1], row[2], row[3], row[4])
rec = fkeys[self.normalize_name(cons_name)]
rec['name'] = self.normalize_name(cons_name)
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = self.normalize_name(remote_table)
# we need to take care of calls without schema. the sqla test suite
# expects referred_schema to be None if None is passed in to this function
if schema is None and self.normalize_name(schema_int) == self.normalize_name(remote_schema):
rec['referred_schema'] = None
else:
rec['referred_schema'] = self.normalize_name(remote_schema)
local_cols.append(self.normalize_name(local_column))
remote_cols.append(self.normalize_name(remote_column))
return list(fkeys.values())