本文整理汇总了Python中sqlalchemy.sql.expression.bindparam函数的典型用法代码示例。如果您正苦于以下问题:Python bindparam函数的具体用法?Python bindparam怎么用?Python bindparam使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bindparam函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_father_ids
def add_father_ids(engine):
ct = table_scheme.categories_t
connection = engine.connect()
#prepared statements
selection = ct.select().where(ct.c.Topic==bindparam('f_topic'))
fid_update = ct.update().where(ct.c.catid==bindparam('child_id')).values(fatherid=bindparam('fatherid_'))
all_categories = connection.execute('SELECT * FROM categories')
counter = 0
sys.stdout.write('\n')
for row in all_categories:
counter += 1
topic = row.Topic
title = row.Title
catid = row.catid
if catid < 3: #ignore "" and "Top"
continue
index = len(topic)-(len(title)+1)
father_topic = topic[:index]
father_selection = connection.execute(selection, f_topic=father_topic)
father = father_selection.first()
if father == None:
LOG.debug('Found no father for "{0}", searched for "{1}"'.format(topic, father_topic))
continue
father_id = father[ct.c.catid]
connection.execute(fid_update, child_id=catid, fatherid_=father_id)
if counter % 10000 == 0:
sys.stdout.write('.')
if counter % 200000 == 0:
sys.stdout.write(' - {0} ids generated\n'.format(counter))
sys.stdout.flush()
print
示例2: list_expired_dids
def list_expired_dids(worker_number=None, total_workers=None, limit=None, session=None):
"""
List expired data identifiers.
:param limit: limit number.
:param session: The database session in use.
"""
query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\
filter(models.DataIdentifier.expired_at < datetime.utcnow()).\
order_by(models.DataIdentifier.expired_at).\
with_hint(models.DataIdentifier, "index(DIDS DIDS_EXPIRED_AT_IDX)", 'oracle')
if worker_number and total_workers and total_workers - 1 > 0:
if session.bind.dialect.name == 'oracle':
bindparams = [bindparam('worker_number', worker_number-1), bindparam('total_workers', total_workers-1)]
query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams))
elif session.bind.dialect.name == 'mysql':
query = query.filter('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1))
elif session.bind.dialect.name == 'postgresql':
query = query.filter('mod(abs((\'x\'||md5(name))::bit(32)::int), %s) = %s' % (total_workers-1, worker_number-1))
elif session.bind.dialect.name == 'sqlite':
row_count = 0
dids = list()
for scope, name, did_type in query.yield_per(10):
if int(md5(name).hexdigest(), 16) % total_workers == worker_number-1:
dids.append({'scope': scope, 'name': name, 'did_type': did_type})
row_count += 1
if limit and row_count >= limit:
return dids
return dids
if limit:
query = query.limit(limit)
return [{'scope': scope, 'name': name, 'did_type': did_type} for scope, name, did_type in query]
示例3: update_item_saved_info
def update_item_saved_info(item):
engine = get_onitsuka_db_engine()
item_owner_id = item['owner_id']
item_id = item['item_id']
user_following = Table('user_following', metaData, autoload=True, autoload_with = engine)
s = select([user_following.c.user_id], (user_following.c.following_id==item_owner_id))
result = engine.execute(s)
user_feed_update_list = list()
for follower in result:
item_owner_follower_id = follower['user_id']
print item_owner_follower_id
user_feed_update_item = {}
user_feed_update_item['user_id'] = item_owner_follower_id
user_feed_update_item['owner_id'] = item_owner_id
user_feed_update_item['item_id'] = item_id
user_feed_update_list.append(user_feed_update_item)
result.close()
user_feed_table = Table('user_feed', metaData, autoload=True, autoload_with = engine)
ins = user_feed_table.insert().values(user_id=bindparam('user_id'), owner_id=bindparam('owner_id'), item_id=bindparam('item_id'))
engine.execute(ins, user_feed_update_list)
示例4: run
def run(self):
session = self.session()
engine = session._WopMarsSession__session.bind
conn = engine.connect()
#
snp2phenotype_path = self.input_file(InsertSNP2Phenotype.__input_file_snp2phenotype)
snp_model = self.input_table(InsertSNP2Phenotype.__input_table_snp)
phenotype_model = self.input_table(InsertSNP2Phenotype.__input_table_phenotype)
snp2phenotype_model = self.output_table(InsertSNP2Phenotype.__output_table_snp2phenotype)
snp2phenotype_df = pandas.read_table(snp2phenotype_path, header=None)
#
# read input file
input_file_obj_list = []
for line in csv.reader(open(snp2phenotype_path, 'r', encoding='utf-8'), delimiter="\t"):
snp_rsid = int(line[0])
phenotype_name = line[1]
input_file_obj_list.append({'snp_rsid' : snp_rsid, 'phenotype_name' : phenotype_name})
#
# create insert
snp_select = select([snp_model.id]).where(snp_model.rsid==bindparam('snp_rsid'))
phenotype_select = select([phenotype_model.id]).where(phenotype_model.name==bindparam('phenotype_name'))
output_table_insert = insert(table=snp2phenotype_model.__table__, values={'snp_id': snp_select, 'phenotype_id': phenotype_select})
#
if len(input_file_obj_list) > 0:
if str(engine.__dict__['url']).split("://")[0]=='sqlite':
engine.execute(output_table_insert.prefix_with("OR IGNORE", dialect='sqlite'), input_file_obj_list)
elif str(engine.__dict__['url']).split("://")[0]=='mysql':
from warnings import filterwarnings # three lines to suppress mysql warnings
import MySQLdb as Database
filterwarnings('ignore', category = Database.Warning)
engine.execute(output_table_insert.prefix_with("IGNORE", dialect='mysql'), input_file_obj_list)
elif str(engine.__dict__['url']).split("://")[0]=='postgresql':
from sqlalchemy.dialects.postgresql import insert as pg_insert
output_table_insert_pg = pg_insert(table=snp2phenotype_model.__table__, values={'snp_id': snp_select, 'phenotype_id': phenotype_select}).on_conflict_do_nothing(index_elements=['snp_id', 'phenotype_id'])
engine.execute(output_table_insert_pg, input_file_obj_list)
示例5: visit_idea
def visit_idea(self, idea, level, prev_result):
if idea.short_title:
self.counter.add_text(self.cleantext(idea.short_title), 2)
if idea.long_title:
self.counter.add_text(self.cleantext(idea.long_title))
if idea.definition:
self.counter.add_text(self.cleantext(idea.definition))
if self.count_posts and level == 0:
from .generic import Content
related = text(
Idea._get_related_posts_statement(),
bindparams=[bindparam('root_idea_id', idea.id),
bindparam('discussion_id', idea.discussion_id)]
).columns(column('post_id')).alias('related')
titles = set()
# TODO maparent: Reoptimize
for content in idea.db.query(
Content).join(
related, related.c.post_id == Content.id):
body = content.body.first_original().value
self.counter.add_text(self.cleantext(body), 0.5)
title = content.subject.first_original().value
title = self.cleantext(title)
if title not in titles:
self.counter.add_text(title)
titles.add(title)
示例6: handle
def handle(self, *args, **options):
# set up
config = get_config()
if config is None:
raise CommandError('Unable to process configuration file p_to_p.yml')
connection = get_connection(config)
pedsnet_session = init_pedsnet(connection)
init_pcornet(connection)
observation_period = pedsnet_session.query(ObservationPeriod.person_id,
ObservationPeriod.observation_period_start_date,
ObservationPeriod.observation_period_end_date,
ObservationPeriod.site,
bindparam("chart", 'Y'),
bindparam("enr_basis", 'E')
).filter(
exists().where(ObservationPeriod.person_id == PersonVisit.person_id)).all()
odo(observation_period, Enrollment.__table__,
dshape='var * {patid: string, enr_start_date: date, enr_end_date: date, site: string, chart: String, '
'enr_basis: String} '
)
# close session
pedsnet_session.close()
# ouutput result
self.stdout.ending = ''
print('Enrollment ETL completed successfully', end='', file=self.stdout)
示例7: demographic_etl
def demographic_etl(config):
# set up
connection = get_connection(config)
pedsnet_session = init_pedsnet(connection)
init_pcornet(connection)
# multiple aliases for pedsnet_pcornet_valueset_map
# to allow the three named joins
gender_value_map = aliased(ValueSetMap)
ethnicity_value_map = aliased(ValueSetMap)
race_value_map = aliased(ValueSetMap)
# extract the data from the person table
person = pedsnet_session.query(Person.person_id,
Person.birth_date,
Person.birth_time,
coalesce(gender_value_map.target_concept, 'OT'),
coalesce(ethnicity_value_map.target_concept, 'OT'),
coalesce(race_value_map.target_concept, 'OT'),
bindparam("biobank_flag", "N"),
Person.gender_source_value,
Person.ethnicity_source_value,
Person.race_source_value,
Person.site,
bindparam("gender_identity", None),
bindparam("raw_gender_identity", None),
bindparam("sexual_orientation", None),
bindparam("raw_sexual_orientation", None)
). \
outerjoin(gender_value_map,
and_(gender_value_map.source_concept_class == 'Gender',
case([(and_(Person.gender_concept_id == None,
gender_value_map.source_concept_id == None), True)],
else_=cast(Person.gender_concept_id, String(200)) ==
gender_value_map.source_concept_id))). \
outerjoin(ethnicity_value_map,
and_(ethnicity_value_map.source_concept_class == 'Hispanic',
case([(and_(Person.ethnicity_concept_id == None,
ethnicity_value_map.source_concept_id == None), True)],
else_=cast(Person.ethnicity_concept_id, String(200)) ==
ethnicity_value_map.source_concept_id))). \
outerjoin(race_value_map,
and_(race_value_map.source_concept_class == 'Race',
case([(and_(Person.race_concept_id == None,
race_value_map.source_concept_id == None), True)],
else_=cast(Person.race_concept_id, String(200)) ==
race_value_map.source_concept_id))).all()
# transform data to pcornet names and types
# load to demographic table
odo(person, Demographic.__table__,
dshape='var * {patid: string, birth_date: date, birth_time: string, sex: string,'
'hispanic: string, race: string, biobank_flag: string, raw_sex: string,'
'raw_hispanic: string, raw_race:string, site: string, gender_identity: string,'
'raw_gender_identity: string, sexual_orientation: string, raw_sexual_orientation: string}'
)
# close session
pedsnet_session.close()
示例8: insert_stock_data
def insert_stock_data(country, market_gsi):
market_id = [ct[0] for ct in markets if ct[1] == market_gsi][0]
insert_stmt = tc_company_stock_prices.insert().values(for_date=bindparam('DailyDate'),
market_id=bindparam('market_id'),
company_id=bindparam('CompanyID'),
open=bindparam('Open'),
max=bindparam('Max'),
min=bindparam('Min'),
close=bindparam('Close'),
volume=bindparam('Volume'),
amount=bindparam('Amount'))
print(insert_stmt)
result = _read_data(country + '.csv')
for item in result:
item['DailyDate'] = datetime.strptime(item['DailyDate'], "%Y-%m-%d").date()
item['CompanyID'] = int(item['CompanyID'])
item['Open'] = float(item['Open'])
item['Close'] = float(item['Close'])
item['Min'] = float(item['Min'])
item['Max'] = float(item['Max'])
item['Volume'] = int(item['Volume'])
item['Amount'] = int(item['Amount'])
item['market_id'] = market_id
# for i, row in enumerate(result):
# print(row)
# if i == 10:
# break
with engine.connect() as conn:
conn.execute(insert_stmt, result)
示例9: run_letter
def run_letter(letter, session, doctype='grant'):
schema = RawLawyer
if doctype == 'application':
schema = App_RawLawyer
letter = letter.upper()
clause1 = schema.organization.startswith(bindparam('letter',letter))
clause2 = schema.name_first.startswith(bindparam('letter',letter))
clauses = or_(clause1, clause2)
lawyers = (lawyer for lawyer in session.query(schema).filter(clauses))
block = clean_lawyers(lawyers)
create_jw_blocks(block)
create_lawyer_table(session)
示例10: run_letter
def run_letter(letter, session, doctype='grant'):
schema = RawAssignee
if doctype == 'application':
schema = App_RawAssignee
letter = letter.upper()
clause1 = schema.organization.startswith(bindparam('letter',letter))
clause2 = schema.name_first.startswith(bindparam('letter',letter))
clauses = or_(clause1, clause2)
assignees = (assignee for assignee in session.query(schema).filter(clauses))
block = clean_assignees(assignees)
create_jw_blocks(block)
create_assignee_table(session)
示例11: get_context_data
def get_context_data(self, **kwargs):
filter_form = ProgressFilterForm(request.args)
conclusion_type = filter_form.conclusion.data
dataset = filter_form.dataset
status_level = self.model_eu_cls.conclusion_status_level2
label_type = self.TREND_LABEL
species = []
if conclusion_type:
if conclusion_type == 'bs':
status_level = self.model_eu_cls.conclusion_status_level1
conclusion_value = self.model_eu_cls.conclusion_status_label
label_type = self.STATUS_LABEL
elif conclusion_type == 'stbp':
conclusion_value = self.model_eu_cls.br_population_trend
elif conclusion_type == 'ltbp':
conclusion_value = self.model_eu_cls.br_population_trend_long
elif conclusion_type == 'stwp':
conclusion_value = self.model_eu_cls.wi_population_trend
elif conclusion_type == 'ltwp':
conclusion_value = self.model_eu_cls.wi_population_trend_long
else:
raise ValueError('Unknown conclusion type')
eu_species = self.get_species_qs(dataset,
conclusion_value,
status_level)
ignore_species = (
self.model_eu_cls.query
.with_entities(self.model_eu_cls.speciescode)
)
ms_species = (
LuDataBird.query
.filter(~LuDataBird.speciescode.in_(ignore_species))
.filter_by(dataset=dataset)
.with_entities(LuDataBird.speciescode.label('code'),
LuDataBird.speciesname.label('name'),
bindparam('conclution', ''),
bindparam('status', ''),
bindparam('additional_record', 0))
)
species = sorted(eu_species.union(ms_species),
key=lambda x: x.name)
return {
'filter_form': filter_form,
'species': species,
'current_selection': filter_form.get_selection(),
'dataset': dataset,
'label_type': label_type,
}
示例12: upgrade_severity_levels
def upgrade_severity_levels(session, severity_map):
"""
Updates the potentially changed severities at the reports.
"""
LOG.debug("Upgrading severity levels started...")
# Create a sql query from the severity map.
severity_map_q = union_all(*[
select([cast(bindparam('checker_id' + str(i), str(checker_id))
.label('checker_id'), sqlalchemy.String),
cast(bindparam('severity' + str(i), Severity._NAMES_TO_VALUES[
severity_map[checker_id]])
.label('severity'), sqlalchemy.Integer)])
for i, checker_id in enumerate(severity_map)]) \
.alias('new_severities')
checker_ids = severity_map.keys()
# Get checkers which has been changed.
changed_checker_q = select([Report.checker_id, Report.severity]) \
.group_by(Report.checker_id, Report.severity) \
.where(Report.checker_id.in_(checker_ids)) \
.except_(session.query(severity_map_q)).alias('changed_severites')
changed_checkers = session.query(changed_checker_q.c.checker_id,
changed_checker_q.c.severity)
# Update severity levels of checkers.
if changed_checkers:
updated_checker_ids = set()
for checker_id, severity_old in changed_checkers:
severity_new = severity_map.get(checker_id, 'UNSPECIFIED')
severity_id = Severity._NAMES_TO_VALUES[severity_new]
LOG.info("Upgrading severity level of '%s' checker from %s to %s",
checker_id,
Severity._VALUES_TO_NAMES[severity_old],
severity_new)
if checker_id in updated_checker_ids:
continue
session.query(Report) \
.filter(Report.checker_id == checker_id) \
.update({Report.severity: severity_id})
updated_checker_ids.add(checker_id)
session.commit()
LOG.debug("Upgrading of severity levels finished...")
示例13: db_operates
def db_operates(action, conn, tbl, rows, pk=['id']):
if rows is None or len(rows)==0: return 0
cnt = 0
if action in ('del', 'mod'):
# generate where clause
u_where_params = []
for o in pk:
if action=='mod': u_where_params.append(tbl.c[o]==bindparam('_'+o))
else: u_where_params.append(tbl.c[o]==bindparam(o))
u_where_clause = and_(*u_where_params)
if action=='add':
if len(rows)==-1:
respxy = conn.execute(tbl.insert(), rows[0])
for idx in xrange(0, len(pk)):
rows[0][pk[idx]]=respxy.inserted_primary_key[idx]
else:
respxy = conn.execute(tbl.insert(), rows)
cnt = respxy.rowcount
elif action=='mod':
# generate values params
u_value_keys = {}
def prepare_columns(t_k, row_):
for k in row_.keys():
if tbl.columns.has_key(k) and not k in pk:
if u_value_keys.has_key(k):
t_k[k] = u_value_keys[k]
else:
t_k[k] = u_value_keys[k] = bindparam(k)
# preparation for key=id
t_u_value_keys = {}
for row in rows:
prepare_columns(t_u_value_keys, row)
for k in row.keys():
if k in pk: row['_'+k]=row[k]
st = tbl.update().where(u_where_clause).values(**t_u_value_keys)
respxy = conn.execute(st, [row])
cnt += respxy.rowcount
t_u_value_keys.clear()
del st
# reset for key=id
for row in rows:
for k in row.keys():
if k in pk: del row['_'+k]
elif action=='del':
respxy = conn.execute(tbl.delete().where(u_where_clause), rows)
cnt = respxy.rowcount
return cnt
示例14: get_user_id
def get_user_id(email = None, session_id = None):
""" Helper function that returns the user_id for a given email address """
if email is not None:
result = db.session.execute(
text("SELECT aaa.get_user_id_by_email(:email)",
bindparams=[bindparam('email', email)]))
return result.first()[0]
if session_id is not None:
result = db.session.execute(
text("SELECT aaa.get_user_id_by_session_id(:session)",
bindparams=[bindparam('session', session_id)]))
return result.first()[0]
return None
示例15: _update
def _update(self, type, offset, values):
"""
:param type: The type prefix to use
:param offset: The address offset to start at
:param values: The values to set
"""
context = self._build_set(type, offset, values, prefix='x_')
query = self._table.update().values(value='value')
query = query.where(and_(
self._table.c.type == bindparam('x_type'),
self._table.c.index == bindparam('x_index')))
result = self._connection.execute(query, context)
return result.rowcount == len(values)