本文整理汇总了Python中sqlalchemy.DDL类的典型用法代码示例。如果您正苦于以下问题:Python DDL类的具体用法?Python DDL怎么用?Python DDL使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DDL类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize_db
def initialize_db(db):
from sqlalchemy import Table, Column, Integer, String, Sequence
if 'scheduled_jobs' in db['metadata'].tables:
# Table already exists. Nothing to do.
return
scheduled_jobs = Table('scheduled_jobs', db['metadata'],
Column("id", Integer,
Sequence('scheduled_jobs_id_seq', start=1000), primary_key=True),
Column("owner", String(50), nullable=False, index=True),
Column("name", String(100), nullable=False, unique=True),
Column("timeout_minutes", Integer, nullable=False),
Column("code_uri", String(300), nullable=False),
Column("commandline", String, nullable=False),
Column("data_bucket", String(200), nullable=False),
Column("num_workers", Integer, nullable=True),
Column("output_dir", String(100), nullable=False),
Column("output_visibility", String(10), nullable=False),
Column("schedule_minute", String(20), nullable=False),
Column("schedule_hour", String(20), nullable=False),
Column("schedule_day_of_month", String(20), nullable=False),
Column("schedule_month", String(20), nullable=False),
Column("schedule_day_of_week", String(20), nullable=False)
)
# Postgres-specific stuff
seq_default = DDL("ALTER TABLE scheduled_jobs ALTER COLUMN id SET DEFAULT nextval('scheduled_jobs_id_seq');")
event.listen(scheduled_jobs, "after_create", seq_default.execute_if(dialect='postgresql'))
# Create the table
db['metadata'].create_all(tables=[scheduled_jobs])
示例2: attach_triggers
def attach_triggers():
""" Attach some database triggers to the File table """
function_snippet = DDL("""
CREATE OR REPLACE FUNCTION update_file_search_text_vector() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.search_text = to_tsvector('pg_catalog.english', NEW.tags) || to_tsvector('pg_catalog.english', translate(NEW.path, '/.', ' '));
END IF;
IF TG_OP = 'UPDATE' THEN
IF NEW.tags <> OLD.tags || NEW.path <> OLD.path THEN
new.search_text = to_tsvector('pg_catalog.english', NEW.tags) || to_tsvector('pg_catalog.english', translate(NEW.path, '/.', ' '));
END IF;
END IF;
RETURN NEW;
END
$$ LANGUAGE 'plpgsql';
""")
trigger_snippet = DDL("""
CREATE TRIGGER search_text_update BEFORE INSERT OR UPDATE
ON files
FOR EACH ROW EXECUTE PROCEDURE
update_file_search_text_vector()
""")
event.listen(File.__table__, 'after_create',
function_snippet.execute_if(dialect='postgresql'))
event.listen(File.__table__, 'after_create',
trigger_snippet.execute_if(dialect='postgresql'))
示例3: after_table
def after_table(self):
statement = self.statement
if hasattr(statement, '__call__'):
statement = statement()
if not isinstance(statement, list):
statement = [statement]
for s in statement:
ddl = DDL(s, self.on, self.context)
ddl.execute_at(self.when, self.entity.table)
示例4: create_session
def create_session(metadata, autoincrement=True, session_id_start=1000):
"""Create Session table.
This function creates the Session table for tracking the various simulations run. For MySQL, it adds
a post-create command to set the lower limit of the auto increment value.
Table Description:
This table contains the log of all simulations (MySQL) or a single simulation (SQLite). Simulation
runs are identified by the combination of the hostname and session Id: *sessionHost_sessionId*.
Parameters
----------
metadata : sqlalchemy.MetaData
The database object that collects the tables.
autoincrement : bool
A flag to set auto incrementing on the sessionID column.
session_id_start : int
A new starting session Id for counting new simulations.
Returns
-------
sqlalchemy.Table
The Session table object.
"""
table = Table("Session", metadata,
Column("sessionId", Integer, primary_key=True, autoincrement=autoincrement, nullable=False,
doc="Numeric identifier for the current simulation instance."),
Column("sessionUser", String(80), nullable=False,
doc="Computer username of the simulation runner."),
Column("sessionHost", String(80), nullable=False,
doc="Computer hostname where the simulation was run."),
Column("sessionDate", DATETIME, nullable=False,
doc="The UTC date/time of the simulation start."),
Column("version", String(25), nullable=True, doc="The version number of the SOCS code."),
Column("runComment", String(200), nullable=True,
doc="A description of the simulation setup."))
Index("s_host_user_date_idx", table.c.sessionUser, table.c.sessionHost, table.c.sessionDate, unique=True)
alter_table = DDL("ALTER TABLE %(table)s AUTO_INCREMENT={};".format(session_id_start))
event.listen(table, 'after_create', alter_table.execute_if(dialect='mysql'))
return table
示例5: cached
code = db.Column(db.Unicode(100), nullable=False, default=generate_coupon_code)
usage_limit = db.Column(db.Integer, nullable=False, default=1)
used_count = cached(db.Column(db.Integer, nullable=False, default=0))
discount_policy_id = db.Column(None, db.ForeignKey('discount_policy.id'), nullable=False)
discount_policy = db.relationship(DiscountPolicy, backref=db.backref('discount_coupons', cascade='all, delete-orphan'))
@classmethod
def is_signed_code_usable(cls, policy, code):
obj = cls.query.filter(cls.discount_policy == policy, cls.code == code, cls.used_count == cls.usage_limit).one_or_none()
if obj:
return False
return True
def update_used_count(self):
from ..models import LineItem, LINE_ITEM_STATUS
self.used_count = db.select([db.func.count()]).where(LineItem.discount_coupon == self).where(LineItem.status == LINE_ITEM_STATUS.CONFIRMED).as_scalar()
create_title_trgm_trigger = DDL(
'''
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE INDEX idx_discount_policy_title_trgm on discount_policy USING gin (title gin_trgm_ops);
''')
event.listen(DiscountPolicy.__table__, 'after_create',
create_title_trgm_trigger.execute_if(dialect='postgresql'))
示例6: load_ddl
def load_ddl():
for script in ('triggers.sql', 'rpmvercmp.sql'):
with open(os.path.join(get_config('directories.datadir'), script)) as ddl_script:
ddl = DDL(ddl_script.read())
listen(Base.metadata, 'after_create', ddl.execute_if(dialect='postgresql'))
示例7: ForeignKey
ForeignKey('entities.entity_id'), default=None),
Column('version', Integer, nullable=False),
Column('deleted_at_version', Integer, default=None),
mysql_engine='InnoDB'
)
Index('idx_attrs_entity_version',
ATTR_TABLE.c.entity_id,
ATTR_TABLE.c.version,
ATTR_TABLE.c.deleted_at_version)
Index('idx_attrs_key', ATTR_TABLE.c.key)
Index('idx_attrs_subkey', ATTR_TABLE.c.subkey)
create_index = DDL('CREATE INDEX idx_attrs_str_value on %(table)s (string_value(20))')
event.listen(ATTR_TABLE, 'after_create', create_index.execute_if(dialect='mysql'))
create_index = DDL('CREATE INDEX idx_attrs_str_value on %(table)s ((substring(string_value,0,20)))')
event.listen(ATTR_TABLE, 'after_create', create_index.execute_if(dialect='postgresql'))
create_index = DDL('CREATE INDEX idx_attrs_str_value on %(table)s (string_value)')
event.listen(ATTR_TABLE, 'after_create', create_index.execute_if(dialect='sqlite'))
COUNTER_TABLE = Table('counters', METADATA,
Column('counter_id', Integer, primary_key=True),
Column('entity_id', Integer, ForeignKey('entities.entity_id'), nullable=False),
Column('attr_key', String(256, convert_unicode=True)),
Column('value', Integer, default=0),
mysql_engine='InnoDB'
)
示例8: getattr
organization_dict[key] = getattr(self, key)()
if include_extras:
for key in ('current_events', 'current_projects', 'current_stories'):
organization_dict[key] = getattr(self, key)()
return organization_dict
tbl = Organization.__table__
# Index the tsvector column
db.Index('index_org_tsv_body', tbl.c.tsv_body, postgresql_using='gin')
# Trigger to populate the search index column
trig_ddl = DDL("""
CREATE TRIGGER tsvupdate_orgs_trigger BEFORE INSERT OR UPDATE ON organization FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(tsv_body, 'pg_catalog.english', name);
""")
# Initialize the trigger after table is created
event.listen(tbl, 'after_create', trig_ddl.execute_if(dialect='postgresql'))
class Story(db.Model):
'''
Blog posts from a Brigade.
'''
# Columns
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.Unicode())
link = db.Column(db.Unicode())
type = db.Column(db.Unicode())
keep = db.Column(db.Boolean())
示例9: dictify
def dictify(self):
artist_dict = dict()
artist_dict['artist_id'] = (self.artist_id)
artist_dict['name'] = (self.name)
artist_dict['num_followers'] = self.num_followers
artist_dict['image_url'] = (self.image_url)
artist_dict['popularity'] = self.popularity
artist_dict['charted_songs'] = [
(song.song_name) for song in self.charted_songs]
artist_dict['genres'] = [(genre.name) for genre in self.genres]
return artist_dict
# Create a trigger to check for updates to Artist and update the TsVector
# accordingly.
ARTIST_VECTOR_TRIGGER = DDL("""
CREATE TRIGGER artist_tsvector_update BEFORE INSERT OR UPDATE ON "Artist" FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(tsvector_col, 'pg_catalog.english', 'name')
""")
event.listen(Artist.__table__, 'after_create',
ARTIST_VECTOR_TRIGGER.execute_if(dialect='postgresql'))
class Year(BASE):
"""
Database model of table 'Year', which stores:
year: the year's number
top_album_name: the name of the top album
top_album_id: the Spotify id of the top album
top_genre_name: the name of the genre of the year's top album
top_album_artist_id: the id of the artist who made the top album
top_genre: the genre of the year's top album
示例10: Column
addrloc_ip = Column(Text)
addr_act = Column(Text)
addr_obj = Column(Text)
ogrn = Column(Text, index=True)
inn = Column(Text, index=True)
goal = Column(Text)
osn_datestart = Column(Text)
osn_dateend = Column(Text)
osn_datestart2 = Column(Text)
osn_other = Column(Text)
check_month = Column(Text)
check_days = Column(Text)
check_hours = Column(Text)
check_form = Column(Text)
check_org = Column(Text)
details_tsvector = Column(TsVector)
# Триггер на таблицу genproc
trigger_snippet = DDL(
"""
CREATE TRIGGER details_tsvector_update BEFORE INSERT OR UPDATE
ON genproc
FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(details_tsvector,'pg_catalog.russian', 'name', 'inn', 'ogrn')
"""
)
event.listen(Genproc.__table__, "after_create", trigger_snippet.execute_if(dialect="postgresql"))
示例11: __repr__
def __repr__(self):
return "<Monkey #{0}>".format(self.id)
change_monkey_friends_count_trigger_ddl = DDL(
"""
CREATE OR REPLACE FUNCTION process_change_monkey_friends_count()
RETURNS TRIGGER AS $change_monkey_friends_count$
BEGIN
IF (TG_OP = 'DELETE') THEN
UPDATE monkeys SET friends_count = friends_count - 1
WHERE id = OLD.monkey_id;
RETURN OLD;
ELSIF (TG_OP = 'INSERT') THEN
UPDATE monkeys SET friends_count = friends_count + 1
WHERE id = NEW.monkey_id;
RETURN NEW;
END IF;
RETURN NULL;
END;
$change_monkey_friends_count$ LANGUAGE plpgsql;
CREATE TRIGGER change_monkey_friends_count
AFTER INSERT OR DELETE ON friends
FOR EACH ROW EXECUTE PROCEDURE process_change_monkey_friends_count();
"""
)
event.listen(
friends_relationships, "after_create", change_monkey_friends_count_trigger_ddl.execute_if(dialect="postgresql")
)
示例12: __repr__
__tablename__ = 'geo_alt_name'
geonameid = db.Column(None, db.ForeignKey('geo_name.id'), nullable=False)
geoname = db.relationship(GeoName, backref=db.backref('alternate_titles', cascade='all, delete-orphan'))
lang = db.Column(db.Unicode(7), nullable=True, index=True)
title = db.Column(db.Unicode(200), nullable=False)
is_preferred_name = db.Column(db.Boolean, nullable=False)
is_short_name = db.Column(db.Boolean, nullable=False)
is_colloquial = db.Column(db.Boolean, nullable=False)
is_historic = db.Column(db.Boolean, nullable=False)
def __repr__(self):
return '<GeoAltName %s "%s" of %s>' % (self.lang, self.title, repr(self.geoname)[1:-1] if self.geoname else None)
create_geo_country_info_index = DDL(
"CREATE INDEX ix_geo_country_info_title ON geo_country_info (lower(title) varchar_pattern_ops);")
event.listen(GeoCountryInfo.__table__, 'after_create',
create_geo_country_info_index.execute_if(dialect='postgresql'))
create_geo_name_index = DDL(
"CREATE INDEX ix_geo_name_title ON geo_name (lower(title) varchar_pattern_ops); "
"CREATE INDEX ix_geo_name_ascii_title ON geo_name (lower(ascii_title) varchar_pattern_ops);")
event.listen(GeoName.__table__, 'after_create',
create_geo_name_index.execute_if(dialect='postgresql'))
create_geo_alt_name_index = DDL(
"CREATE INDEX ix_geo_alt_name_title ON geo_alt_name (lower(title) varchar_pattern_ops);")
event.listen(GeoAltName.__table__, 'after_create',
create_geo_alt_name_index.execute_if(dialect='postgresql'))
示例13: EnumIntType
)
title_table = sa.Table(
"title",
meta.metadata,
sa.Column("title_id", sa.types.Integer, primary_key=True),
sa.Column("name", sa.types.Unicode(255), nullable=False),
sa.Column("year", sa.types.SmallInteger, nullable=False),
sa.Column("type", EnumIntType(config.TITLE_TYPES), nullable=False),
sa.Column("created", sa.types.DateTime(), nullable=False, default=datetime.datetime.now),
sa.Column(
"modified", sa.types.DateTime(), nullable=False, default=datetime.datetime.now, onupdate=datetime.datetime.now
),
sa.UniqueConstraint("name", "year", "type", name="title_info_unq"),
)
title_lower_index = DDL("create index title_name_lower_idx on title ((lower(name)))")
title_trgm_index = DDL("create index title_name_trgm_idx" "on title using gin (name gin_trgm_ops)")
event.listen(title_table, "after_create", title_lower_index.execute_if(dialect="postgresql"))
event.listen(title_table, "after_create", title_trgm_index.execute_if(dialect="postgresql"))
aka_title_table = sa.Table(
"aka_title",
meta.metadata,
sa.Column("aka_title_id", sa.types.Integer, primary_key=True),
sa.Column("title_id", sa.types.Integer, sa.ForeignKey("title.title_id")),
sa.Column("name", sa.types.Unicode(511), nullable=False),
sa.Column("year", sa.types.SmallInteger, nullable=False),
sa.Column("region", sa.types.Unicode(100), nullable=False),
sa.Column("created", sa.types.DateTime(), nullable=False, default=datetime.datetime.now),
sa.Column(
"modified", sa.types.DateTime(), nullable=False, default=datetime.datetime.now, onupdate=datetime.datetime.now
示例14: DDL
# Add Twitter/GitHub accounts to the head of results
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserExternalId.user_id).filter(
UserExternalId.service.in_(UserExternalId.__at_username_services__),
db.func.lower(UserExternalId.username).like(db.func.lower(query[1:]))
).subquery())).options(*cls._defercols).limit(100).all() + users
elif '@' in query:
users = cls.query.filter(cls.status == USER_STATUS.ACTIVE, cls.id.in_(
db.session.query(UserEmail.user_id).filter(UserEmail.user_id != None).filter( # NOQA
db.func.lower(UserEmail.email).like(db.func.lower(query))
).subquery())).options(*cls._defercols).limit(100).all() + users
return users
create_user_index = DDL(
'CREATE INDEX ix_user_username_lower ON "user" (lower(username) varchar_pattern_ops); '
'CREATE INDEX ix_user_fullname_lower ON "user" (lower(fullname) varchar_pattern_ops);')
event.listen(User.__table__, 'after_create',
create_user_index.execute_if(dialect='postgresql'))
class UserOldId(TimestampMixin, db.Model):
__tablename__ = 'useroldid'
__bind_key__ = 'lastuser'
query_class = CoasterQuery
# userid here is NOT a foreign key since it has to continue to exist
# even if the User record is removed
userid = db.Column(db.String(22), nullable=False, primary_key=True)
olduser = db.relationship(User, primaryjoin=foreign(userid) == remote(User.userid),
backref=db.backref('oldid', uselist=False))
示例15: random_long_key
hashid = random_long_key()
if not hashid.isdigit() and model.query.filter_by(hashid=hashid).isempty():
break
return hashid
create_jobpost_search_trigger = DDL(
'''
CREATE FUNCTION jobpost_search_vector_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
END IF;
IF TG_OP = 'UPDATE' THEN
IF NEW.headline <> OLD.headline OR COALESCE(NEW.headlineb, '') <> COALESCE(OLD.headlineb, '') OR NEW.description <> OLD.description OR NEW.perks <> OLD.perks THEN
NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
END IF;
END IF;
RETURN NEW;
END
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER jobpost_search_vector_trigger BEFORE INSERT OR UPDATE ON jobpost
FOR EACH ROW EXECUTE PROCEDURE jobpost_search_vector_update();
CREATE INDEX ix_jobpost_search_vector ON jobpost USING gin(search_vector);
''')
event.listen(JobPost.__table__, 'after_create',
create_jobpost_search_trigger.execute_if(dialect='postgresql'))