本文整理汇总了Python中sqlalchemy.DATE属性的典型用法代码示例。如果您正苦于以下问题:Python sqlalchemy.DATE属性的具体用法?Python sqlalchemy.DATE怎么用?Python sqlalchemy.DATE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类sqlalchemy
的用法示例。
在下文中一共展示了sqlalchemy.DATE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('sms_sender', sa.VARCHAR(length=11), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('sms_sender', sa.VARCHAR(length=11), autoincrement=False, nullable=True))
op.create_table('notification_statistics',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('emails_requested', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('emails_delivered', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('emails_failed', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('sms_requested', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('sms_delivered', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('sms_failed', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('day', sa.DATE(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='notification_statistics_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='notification_statistics_pkey'),
sa.UniqueConstraint('service_id', 'day', name='uix_service_to_day')
)
示例2: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
op.create_table('dm_datetime',
sa.Column('bst_date', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('year', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('month', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('month_name', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('day', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('bst_day', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('day_of_year', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('week_day_name', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('calendar_week', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('quartal', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('year_quartal', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('year_month', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('year_calendar_week', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('financial_year', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('utc_daytime_start', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('utc_daytime_end', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('bst_date', name='dm_datetime_pkey')
)
op.create_index('ix_dm_datetime_yearmonth', 'dm_datetime', ['year', 'month'], unique=False)
op.create_index('ix_dm_datetime_bst_date', 'dm_datetime', ['bst_date'], unique=False)
示例3: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
op.add_column('service_permissions',
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.create_table('template_statistics',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('template_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('usage_count', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('day', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'],
name='template_statistics_service_id_fkey'),
sa.ForeignKeyConstraint(['template_id'], ['templates.id'],
name='template_statistics_template_id_fkey'),
sa.PrimaryKeyConstraint('id', name='template_statistics_pkey')
)
示例4: test_reflect_select
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def test_reflect_select(table, table_using_test_dataset):
for table in [table, table_using_test_dataset]:
assert len(table.c) == 18
assert isinstance(table.c.integer, Column)
assert isinstance(table.c.integer.type, types.Integer)
assert isinstance(table.c.timestamp.type, types.TIMESTAMP)
assert isinstance(table.c.string.type, types.String)
assert isinstance(table.c.float.type, types.Float)
assert isinstance(table.c.boolean.type, types.Boolean)
assert isinstance(table.c.date.type, types.DATE)
assert isinstance(table.c.datetime.type, types.DATETIME)
assert isinstance(table.c.time.type, types.TIME)
assert isinstance(table.c.bytes.type, types.BINARY)
assert isinstance(table.c['record.age'].type, types.Integer)
assert isinstance(table.c['record.name'].type, types.String)
assert isinstance(table.c['nested_record.record.age'].type, types.Integer)
assert isinstance(table.c['nested_record.record.name'].type, types.String)
assert isinstance(table.c.array.type, types.ARRAY)
rows = table.select().execute().fetchall()
assert len(rows) == 1000
示例5: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
op.create_table('provider_statistics',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('day', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('unit_count', sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column('provider_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['provider_id'], ['provider_details.id'], name='provider_stats_to_provider_fk'),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='provider_statistics_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='provider_statistics_pkey')
)
op.create_index('ix_provider_statistics_service_id', 'provider_statistics', ['service_id'], unique=False)
op.create_index('ix_provider_statistics_provider_id', 'provider_statistics', ['provider_id'], unique=False)
示例6: _get_oracle_cdc_client_origin
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def _get_oracle_cdc_client_origin(connection, database, sdc_builder, pipeline_builder, buffer_locally,
src_table_name=None, batch_size=BATCH_SIZE, **kwargs):
kwargs.setdefault('dictionary_source', 'DICT_FROM_ONLINE_CATALOG')
kwargs.setdefault('logminer_session_window', '${10 * MINUTES}')
kwargs.setdefault('db_time_zone', 'UTC')
kwargs.setdefault('maximum_transaction_length', '${1 * MINUTES}')
kwargs.setdefault('initial_change', 'DATE')
if Version('3.14.0') <= Version(sdc_builder.version) < Version('3.16.0'):
# In versions < 3.16 the user has to define a maximum time to look back for a valid dictionary. From
# 3.16 onward this is not required anymore. By default avoid to set an specific duration and use all
# the redo logs instead.
kwargs.setdefault('duration_of_directory_extraction', -1)
if src_table_name is not None:
if Version(sdc_builder.version) >= Version('3.1.0.0'):
tables = [{'schema': database.username.upper(), 'table': src_table_name, 'excludePattern': ''}]
kwargs.setdefault('tables', tables)
else:
kwargs.setdefault('schema_name', database.username.upper())
kwargs.setdefault('tables', [src_table_name])
elif 'tables' not in kwargs.keys():
raise Exception("Either 'tables' or 'src_table_name' must be passed as argument.")
start = _get_current_oracle_time(connection=connection)
kwargs.setdefault('start_date', start.strftime('%d-%m-%Y %H:%M:%S'))
# The time at the oracle db and the node executing the test may not have the exact same time.
# So wait until this node reaches that time (including the timezone offset),
# otherwise validation will fail because the origin thinks the
# start time is in the future.
_wait_until_time(time=start)
logger.info('Start Date is %s', kwargs['start_date'])
oracle_cdc_client = pipeline_builder.add_stage('Oracle CDC Client')
return oracle_cdc_client.set_attributes(buffer_changes_locally=buffer_locally,
max_batch_size_in_records=batch_size,
**kwargs)
示例7: get_audit_ids_by_date
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def get_audit_ids_by_date(cls, target_type, target_id, date):
db = DBSession()
rs = db.query(cls.audit_id).filter(
cls.target_id == target_id,
cls.target_type == target_type,
cast(cls.created_at, DATE) == date
)
return sorted((r[0] for r in rs), reverse=True)
示例8: get
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def get(self, *args, **kwargs):
git_url = self.get_argument('git_url', default=None, strip=True)
relative_path = self.get_argument('relative_path', default=None, strip=True)
hook_name = self.get_argument('hook_name', default=None, strip=True)
search_val = self.get_argument('search_val', default=None, strip=True)
log_list = []
with DBContext('r') as session:
if git_url and relative_path and hook_name:
offset = datetime.timedelta(days=-3)
re_date = (datetime.datetime.now() + offset).strftime('%Y-%m-%d')
hooks_log = session.query(HooksLog.logs_info).filter(HooksLog.git_url == git_url,
HooksLog.relative_path == relative_path,
HooksLog.hook_name == hook_name).filter(
cast(HooksLog.create_time, DATE) > cast(re_date, DATE)).order_by(-HooksLog.id).first()
if hooks_log:
return self.write(dict(code=0, msg='获取成功', data=hooks_log[0]))
else:
return self.write(dict(code=-2, msg='未找到最近触发的任务', data=''))
elif search_val:
hooks_log_info = session.query(HooksLog).filter(
or_(HooksLog.git_url.like('{}%'.format(search_val)),
HooksLog.relative_path.like('{}%'.format(search_val)),
HooksLog.logs_info.like('{}%'.format(search_val)))).order_by(-HooksLog.id).all()
else:
hooks_log_info = session.query(HooksLog).order_by(-HooksLog.id).limit(200).all()
for msg in hooks_log_info:
data_dict = model_to_dict(msg)
data_dict['create_time'] = str(data_dict['create_time'])
log_list.append(data_dict)
return self.write(dict(code=0, msg='获取成功', data=log_list))
示例9: delete
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def delete(self, *args, **kwargs):
data = json.loads(self.request.body.decode("utf-8"))
day_ago = int(data.get('day_ago', 15))
offset = datetime.timedelta(days=-day_ago)
re_date = (datetime.datetime.now() + offset).strftime('%Y-%m-%d')
with DBContext('w', None, True) as session:
session.query(HooksLog).filter(cast(HooksLog.create_time, DATE) < cast(re_date, DATE)).delete(
synchronize_session=False)
return self.write(dict(code=0, msg='删除{}天前的数据成功'.format(day_ago)))
示例10: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("user", "last_update")
op.add_column(
"poll",
sa.Column(
"current_date",
sa.DATE(),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
)
# ### end Alembic commands ###
开发者ID:Nukesor,项目名称:ultimate-poll-bot,代码行数:16,代码来源:2020_04_09_f1c0140a53d4_add_last_updated_to_user.py
示例11: upgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"notification",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("chat_id", sa.BigInteger(), nullable=False),
sa.Column("select_message_id", sa.BigInteger(), nullable=True),
sa.Column("poll_message_id", sa.BigInteger(), nullable=True),
sa.Column(
"created_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.Column(
"updated_at", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.Column("poll_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["poll_id"], ["poll.id"], ondelete="cascade"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint(
"poll_id", "chat_id", name="one_notification_per_poll_and_chat"
),
)
op.create_index(
op.f("ix_notification_poll_id"), "notification", ["poll_id"], unique=False
)
op.add_column("poll", sa.Column("next_notification", sa.DateTime(), nullable=True))
op.alter_column(
"poll",
"due_date",
existing_type=sa.DATE(),
type_=sa.DateTime(),
existing_nullable=True,
)
# ### end Alembic commands ###
示例12: downgrade
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"poll",
"due_date",
existing_type=sa.DateTime(),
type_=sa.DATE(),
existing_nullable=True,
)
op.drop_column("poll", "next_notification")
op.drop_index(op.f("ix_notification_poll_id"), table_name="notification")
op.drop_table("notification")
# ### end Alembic commands ###
示例13: test_reflect_dates
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def test_reflect_dates(self):
metadata = self.metadata
Table(
"date_types",
metadata,
Column("d1", sqltypes.DATE),
Column("d2", oracle.DATE),
Column("d3", TIMESTAMP),
Column("d4", TIMESTAMP(timezone=True)),
Column("d5", oracle.INTERVAL(second_precision=5)),
)
metadata.create_all()
m = MetaData(testing.db)
t1 = Table("date_types", m, autoload=True)
assert isinstance(t1.c.d1.type, oracle.DATE)
assert isinstance(t1.c.d1.type, DateTime)
assert isinstance(t1.c.d2.type, oracle.DATE)
assert isinstance(t1.c.d2.type, DateTime)
assert isinstance(t1.c.d3.type, TIMESTAMP)
assert not t1.c.d3.type.timezone
assert isinstance(t1.c.d4.type, TIMESTAMP)
assert t1.c.d4.type.timezone
assert isinstance(t1.c.d5.type, oracle.INTERVAL)
示例14: test_create_table
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def test_create_table(engine):
meta = MetaData()
table = Table(
'test_pybigquery.test_table_create', meta,
Column('integer_c', sqlalchemy.Integer, doc="column description"),
Column('float_c', sqlalchemy.Float),
Column('decimal_c', sqlalchemy.DECIMAL),
Column('string_c', sqlalchemy.String),
Column('text_c', sqlalchemy.Text),
Column('boolean_c', sqlalchemy.Boolean),
Column('timestamp_c', sqlalchemy.TIMESTAMP),
Column('datetime_c', sqlalchemy.DATETIME),
Column('date_c', sqlalchemy.DATE),
Column('time_c', sqlalchemy.TIME),
Column('binary_c', sqlalchemy.BINARY),
bigquery_description="test table description",
bigquery_friendly_name="test table name"
)
meta.create_all(engine)
meta.drop_all(engine)
# Test creating tables with declarative_base
Base = declarative_base()
class TableTest(Base):
__tablename__ = 'test_pybigquery.test_table_create2'
integer_c = Column(sqlalchemy.Integer, primary_key=True)
float_c = Column(sqlalchemy.Float)
Base.metadata.create_all(engine)
Base.metadata.drop_all(engine)
示例15: test_oracle_cdc_client_stop_pipeline_when_no_archived_logs
# 需要导入模块: import sqlalchemy [as 别名]
# 或者: from sqlalchemy import DATE [as 别名]
def test_oracle_cdc_client_stop_pipeline_when_no_archived_logs(sdc_builder, sdc_executor, database, buffer_locally, use_pattern):
"""
Test for SDC-8418. Pipeline should stop with RUN ERROR when there is no archived log files.
Runs oracle_cdc_client >> trash
"""
db_engine = database.engine
src_table_name = get_random_string(string.ascii_uppercase, 9)
try:
connection = database.engine.connect()
table = _setup_table(database=database, table_name=src_table_name)
logger.info('Using table pattern: %s', src_table_name)
pipeline_builder = sdc_builder.get_pipeline_builder()
oracle_cdc_client = pipeline_builder.add_stage('Oracle CDC Client')
# Obviously past time so there is no archived redo logs for this.
start_date = '30-09-2017 10:10:10'
tables = [{'schema': database.username.upper(), 'table': src_table_name, 'excludePattern': ''}]
oracle_cdc_client.set_attributes(buffer_changes_locally=buffer_locally,
db_time_zone='UTC',
dictionary_source='DICT_FROM_ONLINE_CATALOG',
initial_change='DATE',
logminer_session_window='${10 * MINUTES}',
max_batch_size_in_records=BATCH_SIZE,
maximum_transaction_length='${1 * MINUTES}',
start_date=start_date,
tables=tables)
trash = pipeline_builder.add_stage('Trash')
_wait_until_time(_get_current_oracle_time(connection=connection))
oracle_cdc_client >> trash
pipeline = pipeline_builder.build('Oracle CDC Client Pipeline').configure_for_environment(database)
pipeline.configuration["shouldRetry"] = False
sdc_executor.add_pipeline(pipeline)
# Pipeline should stop with StageExcception
with pytest.raises(Exception):
sdc_executor.start_pipeline(pipeline)
sdc_executor.stop_pipeline(pipeline)
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert 'RUN_ERROR' == status
finally:
if table is not None:
table.drop(db_engine)
logger.info('Table: %s dropped.', src_table_name)