本文整理汇总了Python中sqlalchemy.sql.insert函数的典型用法代码示例。如果您正苦于以下问题:Python insert函数的具体用法?Python insert怎么用?Python insert使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了insert函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_commit
async def test_commit(self, engines, binds):
test_table1 = self.test_models['db1'].test_table1
test_table2 = self.test_models['db2'].test_table2
async with Session(engines, binds) as session:
q = sql.insert(test_table1).values(id=5, title='test_title')
await session.execute(q)
q = sql.insert(test_table2).values(id=10, title='test_title2')
await session.execute(q)
async with Session(engines, binds) as session:
q = sql.select(test_table1.c).where(test_table1.c.id == 5)
rows = await session.execute(q)
self.assertEqual(rows.rowcount, 1)
q = sql.select(test_table2.c).where(test_table2.c.id == 10)
rows = await session.execute(q)
self.assertEqual(rows.rowcount, 1)
try:
async with Session(engines, binds) as session:
q = sql.insert(test_table1).values(id=5, title='test_title')
await session.execute(q)
session.commit()
q = sql.insert(test_table2).values(id=10, title='test_title2')
await session.execute(q)
session.commit()
raise Exception
except:
pass
async with Session(engines, binds) as session:
q = sql.select(test_table1.c).where(test_table1.c.id == 5)
rows = await session.execute(q)
self.assertEqual(rows.rowcount, 1)
q = sql.select(test_table2.c).where(test_table2.c.id == 10)
rows = await session.execute(q)
self.assertEqual(rows.rowcount, 1)
示例2: fill_geo
def fill_geo(self, election, i=0, parent_container_id=None):
level = self.levels[i]
title = self.titles[i]
if election.has_key(title):
for name, contents in election[title].items():
values = {'container_id': parent_container_id,
'name': name, 'type': level}
r1 = self.session.execute(sql.insert(self.tables['geo'], values))
my_id = r1.inserted_primary_key[0]
if contents.has_key('candidates'):
for position, candidates in contents['candidates'].items():
for candidate in candidates:
values = {'name': candidate['name'],
'party_id': self.parties[candidate['party']],
'position': position,
'container_id': my_id}
self.session.execute(sql.insert(self.tables['candidates'],
values))
if contents.has_key('voters'):
values={'voting_center_id': my_id,
'total_votes': contents['voters']
}
self.session.execute(sql.insert(self.tables['votes_check'], values))
if i < len(self.levels) -1:
self.fill_geo(contents, i+1, my_id)
示例3: __init__
def __init__(self):
self._orm_engine = engine_from_config({
'url': CONF.orm.url
}, prefix='')
metadata = MetaData()
aa = models.create_alarm_action_model(metadata).alias('aa')
nm = models.create_notification_method_model(metadata).alias('nm')
nmt_insert = models.create_notification_method_type_model(metadata)
nmt = nmt_insert.alias('nmt')
a = models.create_alarm_model(metadata).alias('a')
self._orm_query = select([nm.c.id, nm.c.type, nm.c.name, nm.c.address, nm.c.period])\
.select_from(aa.join(nm, aa.c.action_id == nm.c.id))\
.where(
and_(aa.c.alarm_definition_id == bindparam('alarm_definition_id'),
aa.c.alarm_state == bindparam('alarm_state')))
self._orm_get_alarm_state = select([a.c.state]).where(a.c.id == bindparam('alarm_id'))
self._orm_nmt_query = select([nmt.c.name])
self._orm_get_notification = select([nm.c.name, nm.c.type, nm.c.address, nm.c.period])\
.where(nm.c.id == bindparam('notification_id'))
self._orm_add_notification_type = insert(nmt_insert).values(name=bindparam('b_name'))
self._orm = None
示例4: _upsert_generic
def _upsert_generic(self, table, items, annotations):
"""Upsert a batch of items one at a time, trying INSERT then UPDATE.
This is a tremendously inefficient way to write a batch of items,
but it's guaranteed to work without special cooperation from the
database. For MySQL we use the much improved _upsert_onduplicatekey.
"""
userid = items[0].get("userid")
num_created = 0
for item in items:
assert item.get("userid") == userid
try:
# Try to insert the item.
# If it already exists, this fails with an integrity error.
query = insert(table).values(**item)
self.execute(query, item, annotations).close()
num_created += 1
except IntegrityError:
# Update the item.
# Use the table's primary key fields in the WHERE clause,
# and put all other fields into the UPDATE clause.
item = item.copy()
query = update(table)
for key in table.primary_key:
try:
query = query.where(key == item.pop(key.name))
except KeyError:
msg = "Item is missing primary key column %r"
raise ValueError(msg % (key.name,))
query = query.values(**item)
self.execute(query, item, annotations).close()
return num_created
示例5: _test_execute
async def _test_execute(self, engines, binds):
test_table1 = self.test_models['db1'].test_table1
test_table2 = self.test_models['db2'].test_table2
async with Session(engines, binds) as session:
q = sql.insert(test_table1).values(id=5, title='test_title')
result = await session.execute(q)
self.assertEqual(result.lastrowid, 5)
q = sql.select(test_table1.c).where(test_table1.c.id == 5)
result = await session.execute(q)
self.assertEqual(result.rowcount, 1)
result = list(result)
self.assertEqual(result[0]['id'], 5)
self.assertEqual(result[0]['title'], 'test_title')
q = sql.update(test_table1).where(test_table1.c.id == 5).\
values(title='test_title2')
result = await session.execute(q)
self.assertEqual(result.rowcount, 1)
q = sql.select(test_table1.c).\
where(test_table1.c.id == 5)
result = await session.execute(q)
self.assertEqual(result.rowcount, 1)
result = list(result)
self.assertEqual(result[0]['id'], 5)
self.assertEqual(result[0]['title'], 'test_title2')
q = sql.delete(test_table1).where(test_table1.c.id == 5)
result = await session.execute(q)
self.assertEqual(result.rowcount, 1)
q = sql.select(test_table1.c).\
where(test_table1.c.id == 5)
result = await session.execute(q)
self.assertEqual(result.rowcount, 0)
示例6: update_suites
def update_suites(status, conf, session, mirror):
"""update stage: sweep and recreate suite mappings
"""
logging.info('update suites mappings...')
insert_q = sql.insert(Suite.__table__)
insert_params = []
# load suites aliases
suites_aliases = mirror.ls_suites_with_aliases()
if not conf['dry_run'] and 'db' in conf['backends']:
session.query(SuiteAlias).delete()
for (suite, pkgs) in six.iteritems(mirror.suites):
if not conf['dry_run'] and 'db' in conf['backends']:
session.query(Suite).filter_by(suite=suite).delete()
for pkg_id in pkgs:
(pkg, version) = pkg_id
db_package = db_storage.lookup_package(session, pkg, version)
if not db_package:
logging.warn('package %s/%s not found in suite %s, skipping'
% (pkg, version, suite))
else:
logging.debug('add suite mapping: %s/%s -> %s'
% (pkg, version, suite))
params = {'package_id': db_package.id,
'suite': suite}
insert_params.append(params)
if pkg_id in status.sources:
# fill-in incomplete suite information in status
status.sources[pkg_id][-1].append(suite)
else:
# defensive measure to make update_suites() more reusable
logging.warn('cannot find %s/%s during suite update'
% (pkg, version))
if not conf['dry_run'] and 'db' in conf['backends'] \
and len(insert_params) >= BULK_FLUSH_THRESHOLD:
session.execute(insert_q, insert_params)
session.flush()
insert_params = []
if not conf['dry_run'] and 'db' in conf['backends']:
session.query(SuiteInfo).filter_by(name=suite).delete()
_add_suite(conf, session, suite, aliases=suites_aliases[suite])
if not conf['dry_run'] and 'db' in conf['backends'] \
and insert_params:
session.execute(insert_q, insert_params)
session.flush()
# update sources.txt, now that we know the suite mappings
src_list_path = os.path.join(conf['cache_dir'], 'sources.txt')
with open(src_list_path + '.new', 'w') as src_list:
for pkg_id, src_entry in six.iteritems(status.sources):
fields = list(pkg_id)
fields.extend(src_entry[:-1]) # all except suites
fields.append(string.join(src_entry[-1], ','))
src_list.write(string.join(fields, '\t') + '\n')
os.rename(src_list_path + '.new', src_list_path)
示例7: subscribe_to_level
def subscribe_to_level():
try:
token = request.form["token"]
level_id = request.form["id"]
if token is None:
raise MissingInformation("token")
if level_id is None:
raise MissingInformation("id")
except MissingInformation as e:
return make_error(e.message)
try:
user_id = get_user_id_from_token(token)
except InvalidInformation as e:
return make_error(e.message)
print(level_id, user_id)
conn = engine.connect()
query = sql.insert(
Subscription,
values={Subscription.level_id: level_id,
Subscription.user_id: user_id}
)
x = conn.execute(query)
# if x:
# print("1")
# else:
# print("2")
return make_status("success", "Subscribed to level")
示例8: create_user
def create_user(self, username, password, email, **extra_fields):
"""Creates a user. Returns True on success."""
if not self.allow_new_users:
raise BackendError("Creation of new users is disabled")
password_hash = sscrypt(password)
values = {
'username': username,
'password': password_hash,
'mail': email,
}
for field in ('userid', 'accountStatus', 'mailVerified', 'syncNode'):
if field in extra_fields:
values[field] = extra_fields[field]
query = insert(users).values(**values)
try:
res = safe_execute(self._engine, query)
except IntegrityError:
#Name already exists
return False
if res.rowcount != 1:
return False
#need a copy with some of the info for the return value
userobj = User()
userobj['username'] = username
userobj['userid'] = res.lastrowid
userobj['mail'] = email
return userobj
示例9: _get_or_create_nevra
def _get_or_create_nevra(self, nevra):
dep = self.nevras.get(nevra)
if dep is None:
dep = self.db.query(*Dependency.inevra)\
.filter((Dependency.name == nevra[0]) &
(Dependency.epoch == nevra[1]) &
(Dependency.version == nevra[2]) &
(Dependency.release == nevra[3]) &
(Dependency.arch == nevra[4]))\
.first()
if dep is None:
kwds = dict(name=nevra[0], epoch=nevra[1], version=nevra[2],
release=nevra[3], arch=nevra[4])
dep_id = (
self.db.execute(
insert(
Dependency,
[kwds],
returning=(Dependency.id,)
)
)
.fetchone().id
)
dep = DepTuple(id=dep_id, **kwds)
self.inserts += 1
else:
self.misses += 1
self._add(dep)
else:
self.hits += 1
self._access(dep)
return dep
示例10: save_logbook
def save_logbook(self, book):
try:
logbooks = self._tables.logbooks
with self._engine.begin() as conn:
q = (sql.select([logbooks]).
where(logbooks.c.uuid == book.uuid))
row = conn.execute(q).first()
if row:
e_lb = self._converter.convert_book(row)
self._converter.populate_book(conn, e_lb)
e_lb.merge(book)
conn.execute(sql.update(logbooks)
.where(logbooks.c.uuid == e_lb.uuid)
.values(e_lb.to_dict()))
for fd in book:
e_fd = e_lb.find(fd.uuid)
if e_fd is None:
e_lb.add(fd)
self._insert_flow_details(conn, fd, e_lb.uuid)
else:
self._update_flow_details(conn, fd, e_fd)
return e_lb
else:
conn.execute(sql.insert(logbooks, book.to_dict()))
for fd in book:
self._insert_flow_details(conn, fd, book.uuid)
return book
except sa_exc.DBAPIError:
exc.raise_with_cause(
exc.StorageFailure,
"Failed saving logbook '%s'" % book.uuid)
示例11: set_collection
def set_collection(self, user_id, collection_name, **values):
"""Creates a collection"""
# XXX values is not used for now because there are no values besides
# the name
if self.collection_exists(user_id, collection_name):
return
values['userid'] = user_id
values['name'] = collection_name
if self.standard_collections:
ids = _STANDARD_COLLECTIONS.keys()
min_id = max(ids) + 1
else:
min_id = 0
# getting the max collection_id
# XXX why don't we have an autoinc here ?
# see https://bugzilla.mozilla.org/show_bug.cgi?id=579096
next_id = -1
while next_id < min_id:
query = self._get_query('COLLECTION_NEXTID', user_id)
max_ = self._do_query_fetchone(query, user_id=user_id)
if max_[0] is None:
next_id = min_id
else:
next_id = max_[0] + 1
# insertion
values['collectionid'] = next_id
query = insert(collections).values(**values)
self._do_query(query, **values)
return next_id
示例12: upgrade
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
Discussion.__table__.create()
Comment.__table__.create()
Artwork.__table__.c.discussion_id.nullable = True
Artwork.__table__.c.discussion_id.create()
User.__table__.c.discussion_id.nullable = True
User.__table__.c.discussion_id.create()
# Create a new discussion for each artwork and user
conn = migrate_engine.connect()
tr = conn.begin()
for table in Artwork, User:
for id, in conn.execute( sql.select([table.id]) ):
res = conn.execute( sql.insert(Discussion.__table__) )
discussion_id = res.inserted_primary_key[0]
conn.execute(sql.update(
table.__table__,
table.__table__.c.id == id,
dict(discussion_id=discussion_id),
))
tr.commit()
Artwork.__table__.c.discussion_id.alter(nullable=False)
User.__table__.c.discussion_id.alter(nullable=False)
示例13: add_as_notified
def add_as_notified(self, url_id):
self.md.clear()
md = MetaData(self.engine)
t = Table('notification', md, autoload=True)
i = insert(t).values(url_id=url_id,
notified_date=datetime.now().strftime('%Y%m%d'))
i.execute()
示例14: _append_user
def _append_user(self):
"""Add new recommend user."""
self.md.clear()
t = Table('user', self.md, autoload=True)
i = insert(t).values(name=self.name)
i.execute()
# TODO: Change logic.
_id = self._load_user_no()
logging.info('Add new user(id={}, name={}).'.format(_id, self.name))
return _id
示例15: test_get_engine
async def test_get_engine(self, engines, binds):
test_table1 = self.test_models['db1'].test_table1
test_table2 = self.test_models['db2'].test_table2
async with Session(engines, binds) as session:
i1 = sql.insert(test_table1)
i2 = sql.insert(test_table2)
u1 = sql.update(test_table1)
u2 = sql.update(test_table2)
d1 = sql.delete(test_table1)
d2 = sql.delete(test_table2)
s1 = sql.insert(test_table1)
s2 = sql.insert(test_table2)
for q1, q2 in [(i1, i2), (u1, u2), (d1, d2), (s1, s2)]:
engine1 = session.get_engine(q1)
engine2 = session.get_engine(q2)
self.assertEqual(engine1, engines['db1'])
self.assertEqual(engine2, engines['db2'])
with self.assertRaises(exc.OrmError):
session.get_engine('error query')