本文整理汇总了Python中sqlalchemy.dialects.mysql.DOUBLE属性的典型用法代码示例。如果您正苦于以下问题:Python mysql.DOUBLE属性的具体用法?Python mysql.DOUBLE怎么用?Python mysql.DOUBLE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类sqlalchemy.dialects.mysql
的用法示例。
在下文中一共展示了mysql.DOUBLE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: import_coin_info
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def import_coin_info(chain_param=None):
"""获取全球交易币基本信息"""
table_name = 'tushare_coin_info'
has_table = engine_md.has_table(table_name)
# 设置 dtype
dtype = {
'coin': String(60),
'en_name': String(60),
'cn_name': String(60),
'issue_date': Date,
'amount': DOUBLE,
}
coinlist_df = pro.coinlist(start_date='20170101', end_date=date_2_str(date.today(), DATE_FORMAT_STR))
data_count = bunch_insert_on_duplicate_update(coinlist_df, table_name, engine_md, dtype)
logging.info("更新 %s 完成 新增数据 %d 条", table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
create_pk_str = """ALTER TABLE {table_name}
CHANGE COLUMN `coin` `coin` VARCHAR(60) NOT NULL FIRST,
CHANGE COLUMN `en_name` `en_name` VARCHAR(60) NOT NULL AFTER `coin`,
ADD PRIMARY KEY (`coin`, `en_name`)""".format(table_name=table_name)
with with_db_session(engine_md) as session:
session.execute(create_pk_str)
示例2: add_new_col_data
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)ckpv表增加数据
3)第二部不见得1天能够完成,当第二部完成后,将ckvp数据更新daily表中
:param col_name:增加字段名称
:param param: 参数
:param chain_param: 该参数仅用于 task.chain 串行操作时,上下传递参数使用
:param db_col_name: 默认为 None,此时与col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不区分大小写
:param ths_code_set: 默认 None, 否则仅更新指定 ths_code
:return:
"""
table_name = 'ifind_stock_daily_ds'
if db_col_name is None:
# 默认为 None,此时与col_name相同
db_col_name = col_name
# 检查当前数据库是否存在 db_col_name 列,如果不存在则添加该列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 将数据增量保存到 ckdvp 表
all_finished = add_data_2_ckdvp(col_name, param, ths_code_set)
# 将数据更新到 ds 表中
if all_finished:
sql_str = """update {table_name} daily, ifind_ckdvp_stock ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(table_name=table_name, db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 %s 表', db_col_name, table_name)
示例3: add_new_col_data_to_fin
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def add_new_col_data_to_fin(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 fin 表,增加字段
2)ckpv表增加数据
3)第二部不见得1天能够完成,当第二部完成后,将ckvp数据更新fin表中
:param col_name:增加字段名称
:param param: 参数
:param chain_param: 该参数仅用于 task.chain 串行操作时,上下传递参数使用
:param dtype: 数据库字段类型
:param db_col_name: 默认为 None,此时与col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不区分大小写
:param ths_code_set: 默认 None, 否则仅更新指定 ths_code
:return:
"""
table_name = 'ifind_stock_fin'
if db_col_name is None:
# 默认为 None,此时与col_name相同
db_col_name = col_name
# 检查当前数据库是否存在 db_col_name 列,如果不存在则添加该列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 将数据增量保存到 ckdvp 表
all_finished = add_data_fin_2_ckdvp(col_name, param, ths_code_set)
# 将数据更新到 ds 表中
if all_finished:
sql_str = """update {table_name} daily, ifind_stock_ckdvp ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(table_name=table_name, db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 %s 表', db_col_name, table_name)
示例4: save_future_daily_df_list
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def save_future_daily_df_list(data_df_list):
"""将期货历史数据保存的数据库"""
data_df_count = len(data_df_list)
if data_df_count > 0:
logger.info('merge data with %d df', data_df_count)
data_df = pd.concat(data_df_list)
data_count = data_df.shape[0]
data_df.to_sql('ifind_future_daily', engine_md, if_exists='append', index=False,
dtype={
'ths_code': String(20),
'time': Date,
'preClose': String(20),
'open': DOUBLE,
'high': DOUBLE,
'low': DOUBLE,
'close': DOUBLE,
'volume': DOUBLE,
'amount': DOUBLE,
'avgPrice': DOUBLE,
'change': DOUBLE,
'changeRatio': DOUBLE,
'preSettlement': DOUBLE,
'settlement': DOUBLE,
'change_settlement': DOUBLE,
'chg_settlement': DOUBLE,
'openInterest': DOUBLE,
'positionChange': DOUBLE,
'amplitude': DOUBLE,
})
logger.info("更新 wind_future_daily 结束 %d 条记录被更新", data_count)
else:
logger.info("更新 wind_future_daily 结束 0 条记录被更新")
示例5: add_new_col_data
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)ckpv表增加数据
3)第二部不见得1天能够完成,当第二部完成后,将ckvp数据更新daily表中
:param chain_param: 该参数仅用于 task.chain 串行操作时,上下传递参数使用
:param col_name:增加字段名称
:param param: 参数
:param db_col_name: 默认为 None,此时与col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不区分大小写
:param ths_code_set: 默认 None, 否则仅更新指定 ths_code
:return:
"""
if db_col_name is None:
# 默认为 None,此时与col_name相同
db_col_name = col_name
table_name = 'ifind_index_daily_ds'
# 检查当前数据库是否存在 db_col_name 列,如果不存在则添加该列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 将数据增量保存到 ckdvp 表
all_finished = add_data_2_ckdvp(col_name, param, ths_code_set)
# 将数据更新到 ds 表中
if all_finished:
sql_str = """update ifind_index_daily_ds daily, ifind_ckdvp_index ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 ifind_index_daily_ds 表', db_col_name)
示例6: add_new_col_data
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE',
wind_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)wind_ckdvp_stock表增加数据
3)第二部不见得1天能够完成,当第二部完成后,将wind_ckdvp_stock数据更新daily表中
:param col_name:增加字段名称
:param param: 参数
:param chain_param: 在celery 中將前面結果做爲參數傳給後面的任務
:param db_col_name: 默认为 None,此时与col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不区分大小写
:param wind_code_set: 默认 None, 否则仅更新指定 wind_code
:return:
"""
if db_col_name is None:
# 默认为 None,此时与col_name相同
db_col_name = col_name
# 检查当前数据库是否存在 db_col_name 列,如果不存在则添加该列
add_col_2_table(engine_md, 'wind_stock_daily', db_col_name, col_type_str)
# 将数据增量保存到 wind_ckdvp_stock 表
all_finished = add_data_2_ckdvp(col_name, param, wind_code_set)
# 将数据更新到 ds 表中
# 对表的列进行整合,daily表的列属性值插入wind_ckdvp_stock的value 根据所有条件进行判定
if all_finished:
sql_str = """
update wind_stock_daily daily, wind_ckdvp_stock ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.wind_code = ckdvp.wind_code
and ckdvp.key = '{db_col_name}' and ckdvp.param = '{param}'
and ckdvp.time = daily.trade_date""".format(db_col_name=db_col_name, param=param)
# 进行事务提交
with with_db_session(engine_md) as session:
rst = session.execute(sql_str)
data_count = rst.rowcount
session.commit()
logger.info('更新 %s 字段 wind_stock_daily 表 %d 条记录', db_col_name, data_count)
示例7: insert_into_db
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def insert_into_db(data_df_list, engine_md):
data_count = len(data_df_list)
table_name = 'wind_stock_tick'
has_table = engine_md.has_table(table_name)
param_list = [
('datetime', DateTime),
('open', DOUBLE),
('high', DOUBLE),
('low', DOUBLE),
('close', DOUBLE),
('ask1', DOUBLE),
('bid1', DOUBLE),
('asize1', DOUBLE),
('bsize1', DOUBLE),
('volume', DOUBLE),
('amount', DOUBLE),
('preclose', DOUBLE),
]
dtype = {key: val for key, val in param_list}
dtype['wind_code'] = String(20)
if data_count > 0:
data_df_all = pd.concat(data_df_list)
data_df_all.index.rename('datetime', inplace=True)
data_df_all.reset_index(inplace=True)
bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md, dtype=dtype)
logger.info('%d data imported', data_df_all.shape[0])
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
return data_count
示例8: fund_nav_df_2_sql
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def fund_nav_df_2_sql(table_name, fund_nav_df, engine_md, is_append=True):
col_name_param_list = [
('NAV_DATE', Date),
('NAV', DOUBLE),
('NAV_ACC', DOUBLE),
]
col_name_dic = {col_name.upper(): col_name.lower() for col_name, _ in col_name_param_list}
dtype = {col_name.lower(): val for col_name, val in col_name_param_list}
dtype['wind_code'] = String(200)
dtype['trade_date'] = Date
# print('reorg dfnav data[%d, %d]' % fund_nav_df.shape)
try:
fund_nav_df['NAV_DATE'] = pd.to_datetime(fund_nav_df['NAV_DATE']).apply(lambda x: x.date())
except Exception as exp:
logger.exception(str(fund_nav_df['NAV_DATE']))
return None
trade_date_s = pd.to_datetime(fund_nav_df.index)
trade_date_latest = trade_date_s.max().date()
fund_nav_df['trade_date'] = trade_date_s
fund_nav_df.rename(columns=col_name_dic, inplace=True)
# fund_nav_df['trade_date'] = trade_date_s
fund_nav_df.set_index(['wind_code', 'trade_date'], inplace=True)
fund_nav_df.reset_index(inplace=True)
# action_str = 'append' if is_append else 'replace'
# print('df--> sql fundnav table if_exists="%s"' % action_str)
bunch_insert_on_duplicate_update(fund_nav_df, table_name, engine_md, dtype=dtype)
# fund_nav_df.to_sql(table_name, engine_md, if_exists=action_str, index_label=['wind_code', 'trade_date'],
# dtype={
# 'wind_code': String(200),
# 'nav_date': Date,
# 'trade_date': Date,
# }) # , index=False
logger.info('%d data inserted', fund_nav_df.shape[0])
return trade_date_latest
示例9: add_new_col_data
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE',
wind_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)wind_ckdvp_stock_hk表增加数据
3)第二部不见得1天能够完成,当第二部完成后,将wind_ckdvp_stock_hk数据更新daily表中
:param col_name:增加字段名称
:param param: 参数
:param chain_param: 在celery 中將前面結果做爲參數傳給後面的任務
:param db_col_name: 默认为 None,此时与col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不区分大小写
:param wind_code_set: 默认 None, 否则仅更新指定 wind_code
:return:
"""
if db_col_name is None:
# 默认为 None,此时与col_name相同
db_col_name = col_name
# 检查当前数据库是否存在 db_col_name 列,如果不存在则添加该列
add_col_2_table(engine_md, 'wind_stock_daily_hk', db_col_name, col_type_str)
# 将数据增量保存到 wind_ckdvp_stock_hk 表
all_finished = add_data_2_ckdvp(col_name, param, wind_code_set)
# 将数据更新到 ds 表中
# 对表的列进行整合,daily表的列属性值插入wind_ckdvp_stock_hk的value 根据所有条件进行判定
if all_finished:
sql_str = """
update wind_stock_daily_hk daily, wind_ckdvp_stock_hk ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.wind_code = ckdvp.wind_code
and ckdvp.key = '{db_col_name}' and ckdvp.param = '{param}'
and ckdvp.time = daily.trade_date""".format(db_col_name=db_col_name, param=param)
# 进行事务提交
with with_db_session(engine_md) as session:
rst = session.execute(sql_str)
data_count = rst.rowcount
session.commit()
logger.info('更新 %s 字段 wind_stock_daily_hk 表 %d 条记录', db_col_name, data_count)
示例10: import_index_info
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def import_index_info(chain_param=None, ths_code=None):
"""
导入 info 表
:param chain_param: 该参数仅用于 task.chain 串行操作时,上下传递参数使用
:param ths_code:
:param refresh:
:return:
"""
table_name = 'ifind_index_info'
has_table = engine_md.has_table(table_name)
logging.info("更新 ifind_index_info 开始")
if ths_code is None:
# 获取全市场股票代码及名称
date_end = date.today()
stock_code_set = set()
stock_code_set_sub = get_stock_code_set(date_end)
if stock_code_set_sub is not None:
stock_code_set |= stock_code_set_sub
ths_code = ','.join(stock_code_set)
indicator_param_list = [
('ths_index_short_name_index', '', String(20)),
('ths_index_code_index', '', String(10)),
('ths_index_category_index', '', String(20)),
('ths_index_base_period_index', '', Date),
('ths_index_base_point_index', '', DOUBLE),
('ths_publish_org_index', '', String(20)),
]
# indicator' = 'ths_index_short_name_index;ths_index_code_index;ths_thscode_index;ths_index_category_index;
# ths_index_base_period_index;ths_index_base_point_index;ths_publish_org_index',
# param = ';;;;;;'
indicator, param = unzip_join([(key, val) for key, val, _ in indicator_param_list], sep=';')
data_df = invoker.THS_BasicData(ths_code, indicator, param)
if data_df is None or data_df.shape[0] == 0:
logging.info("没有可用的 index info 可以更新")
return
dtype = {key: val for key, _, val in indicator_param_list}
dtype['ths_code'] = String(20)
data_count = bunch_insert_on_duplicate_update(data_df, table_name, engine_md, dtype)
logging.info("更新 %s 完成 存量数据 %d 条", table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
# 更新 code_mapping 表
update_from_info_table(table_name)
示例11: import_tushare_block_trade
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def import_tushare_block_trade(chain_param=None):
"""
插入股票日线数据到最近一个工作日-1。
如果超过 BASE_LINE_HOUR 时间,则获取当日的数据
:return:
"""
table_name = 'tushare_block_trade'
logging.info("更新 %s 开始", table_name)
param_list = [
('trade_date', Date),
('ts_code', String(20)),
('price', DOUBLE),
('vol', DOUBLE),
('amount', DOUBLE),
('buyer', String(100)),
('seller', String(100)),
]
has_table = engine_md.has_table(table_name)
# 进行表格判断,确定是否含有 table_name
if has_table:
sql_str = f"""select cal_date
FROM
(
select * from tushare_trade_date trddate
where( cal_date>(SELECT max(trade_date) FROM {table_name}))
)tt
where (is_open=1
and cal_date <= if(hour(now())<16, subdate(curdate(),1), curdate())
and exchange='SSE') """
else:
# 2003-08-02 大宗交易制度开始实施
sql_str = """SELECT cal_date FROM tushare_trade_date trddate WHERE (trddate.is_open=1
AND cal_date <= if(hour(now())<16, subdate(curdate(),1), curdate())
AND exchange='SSE' AND cal_date>='2003-08-02') ORDER BY cal_date"""
logger.warning('%s 不存在,仅使用 tushare_trade_date 表进行计算日期范围', table_name)
with with_db_session(engine_md) as session:
# 获取交易日数据
table = session.execute(sql_str)
trade_date_list = list(row[0] for row in table.fetchall())
# 设置 dtype
dtype = {key: val for key, val in param_list}
try:
trade_date_list_len = len(trade_date_list)
for num, trade_date in enumerate(trade_date_list, start=1):
trade_date = datetime_2_str(trade_date, STR_FORMAT_DATE_TS)
data_df = invoke_block_trade(trade_date=trade_date)
if len(data_df) > 0:
# 当前表不设置主键,由于存在重复记录,因此无法设置主键
# 例如:002325.SZ 2014-11-17 华泰证券股份有限公司沈阳光荣街证券营业部 两笔完全相同的大宗交易
data_count = bunch_insert(
data_df, table_name=table_name, dtype=dtype)
logging.info("%d/%d) %s更新 %s 结束 %d 条信息被更新",
num, trade_date_list_len, trade_date, table_name, data_count)
else:
logging.info("%d/%d) %s 无数据信息可被更新", num, trade_date_list_len, trade_date)
except:
logger.exception('更新 %s 表异常', table_name)
示例12: import_trade_date
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def import_trade_date(chain_param=None):
"""
增量导入交易日数据导数据库表 wind_trade_date,默认导入未来300天的交易日数据
2018-01-17 增加港股交易日数据,眼前考虑对减少对已有代码的冲击,新建一张 wind_trade_date_hk表
日后将会考虑将两张表进行合并
:return:
"""
table_name = TABLE_NAME
exch_code_trade_date_dic = {}
has_table = engine_md.has_table(table_name)
if has_table:
with with_db_session(engine_md) as session:
try:
table = session.execute('SELECT exchange,max(cal_date) FROM {table_name} GROUP BY exchange'.format(
table_name=table_name
))
exch_code_trade_date_dic = {exch_code: trade_date for exch_code, trade_date in table.fetchall()}
except Exception as exp:
logger.exception("交易日获取异常")
exchange_code_dict = {
"HKEX": "香港联合交易所",
"SZSE": "深圳证券交易所",
"SSE": "上海证券交易所",
}
exchange_code_list = list(exchange_code_dict.keys())
for exchange_code in exchange_code_list:
if exchange_code in exch_code_trade_date_dic:
trade_date_max = exch_code_trade_date_dic[exchange_code]
start_date_str = (trade_date_max + timedelta(days=1)).strftime(STR_FORMAT_DATE_TS)
else:
start_date_str = '19900101'
end_date_str = (date.today() + timedelta(days=310)).strftime(STR_FORMAT_DATE_TS)
trade_date_df = pro.trade_cal(exchange_id='', start_date=start_date_str, end_date=end_date_str)
if trade_date_df is None or trade_date_df.shape[0] == 0:
logger.warning('%s[%s] [%s - %s] 没有查询到交易日期',
exchange_code_dict[exchange_code], exchange_code, start_date_str, end_date_str)
continue
date_count = trade_date_df.shape[0]
logger.info("%s[%s] %d 条交易日数据将被导入 %s",
exchange_code_dict[exchange_code], exchange_code, date_count, table_name)
date_count = bunch_insert_on_duplicate_update(trade_date_df, table_name, engine_md, dtype={
'exchange': String(10),
'cal_date': Date,
'is_open': DOUBLE,
}, myisam_if_create_table=True, primary_keys=['exchange', 'cal_date'], schema=config.DB_SCHEMA_MD)
logger.info('%s[%s] %d 条交易日数据导入 %s 完成',
exchange_code_dict[exchange_code], exchange_code, date_count, table_name)
示例13: update_df_2_db
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def update_df_2_db(instrument_type, table_name, data_df):
"""将 DataFrame 数据保存到 数据库对应的表中"""
dtype = {
'trade_date': Date,
'Contract': String(20),
'ContractNext': String(20),
'Close': DOUBLE,
'CloseNext': DOUBLE,
'Volume': DOUBLE,
'VolumeNext': DOUBLE,
'OI': DOUBLE,
'OINext': DOUBLE,
'Open': DOUBLE,
'OpenNext': DOUBLE,
'High': DOUBLE,
'HighNext': DOUBLE,
'Low': DOUBLE,
'LowNext': DOUBLE,
'Amount': DOUBLE,
'AmountNext': DOUBLE,
'adj_factor_main': DOUBLE,
'adj_factor_secondary': DOUBLE,
'instrument_type': String(20),
}
# 为了解决 AttributeError: 'numpy.float64' object has no attribute 'translate' 错误,需要将数据类型转换成 float
data_df["Close"] = data_df["Close"].apply(str_2_float)
data_df["CloseNext"] = data_df["CloseNext"].apply(str_2_float)
data_df["Volume"] = data_df["Volume"].apply(str_2_float)
data_df["VolumeNext"] = data_df["VolumeNext"].apply(str_2_float)
data_df["OI"] = data_df["OI"].apply(str_2_float)
data_df["OINext"] = data_df["OINext"].apply(str_2_float)
data_df["Open"] = data_df["Open"].apply(str_2_float)
data_df["OpenNext"] = data_df["OpenNext"].apply(str_2_float)
data_df["High"] = data_df["High"].apply(str_2_float)
data_df["HighNext"] = data_df["HighNext"].apply(str_2_float)
data_df["Low"] = data_df["Low"].apply(str_2_float)
data_df["LowNext"] = data_df["LowNext"].apply(str_2_float)
data_df["Amount"] = data_df["Amount"].apply(str_2_float)
data_df["AmountNext"] = data_df["AmountNext"].apply(str_2_float)
data_df["adj_factor_main"] = data_df["adj_factor_main"].apply(str_2_float)
data_df["adj_factor_secondary"] = data_df["adj_factor_secondary"].apply(str_2_float)
# 清理历史记录
with with_db_session(engine_md) as session:
sql_str = """SELECT table_name FROM information_schema.TABLES
WHERE table_name = :table_name and TABLE_SCHEMA=(select database())"""
# 复权数据表
is_existed = session.execute(sql_str, params={"table_name": table_name}).fetchone()
if is_existed is not None:
session.execute("delete from %s where instrument_type = :instrument_type" % table_name,
params={"instrument_type": instrument_type})
logger.debug("删除 %s 中的 %s 历史数据", table_name, instrument_type)
# 插入数据库
bunch_insert(data_df, table_name=table_name, dtype=dtype, primary_keys=['trade_date', 'Contract'])
示例14: import_tushare_stock_fund_holdings
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def import_tushare_stock_fund_holdings():
table_name = 'tushare_stock_fund_holdings'
logging.info("更新 %s 开始", table_name)
has_table = engine_md.has_table(table_name)
tushare_fund_holdings_indicator_param_list = [
('ts_code', String(20)),
('sec_name', String(20)),
('end_date', Date),
('nums', DOUBLE),
('nlast', DOUBLE),
('count', DOUBLE),
('clast', DOUBLE),
('amount', DOUBLE),
('ratio', DOUBLE),
]
tushare_fund_holdings_dtype = {key: val for key, val in tushare_fund_holdings_indicator_param_list}
data_df_list, data_count, all_data_count, = [], 0, 0
years = list(range(2013, 2019))
try:
for year in years:
for quarter in list([1, 2, 3, 4]):
print((year, quarter))
data_df = invoke_fund_holdings(year, quarter)
ts_code_list = []
for i in data_df.code:
if i[0] == '6':
sh = i + '.SH'
ts_code_list.append(sh)
else:
sz = i + '.SZ'
ts_code_list.append(sz)
data_df.code = ts_code_list
data_df = data_df.rename(columns={'code': 'ts_code', 'name': 'sec_name', 'date': 'end_date'})
# 把数据攒起来
if data_df is not None and data_df.shape[0] > 0:
data_count += data_df.shape[0]
data_df_list.append(data_df)
# 大于阀值有开始插入
if data_count >= 50:
data_df_all = pd.concat(data_df_list)
bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md, tushare_fund_holdings_dtype)
all_data_count += data_count
data_df_list, data_count = [], 0
finally:
if len(data_df_list) > 0:
data_df_all = pd.concat(data_df_list)
data_count = bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md,
tushare_fund_holdings_dtype)
all_data_count = all_data_count + data_count
logging.info("更新 %s 结束 %d 条信息被更新", table_name, all_data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
示例15: merge_index_info
# 需要导入模块: from sqlalchemy.dialects import mysql [as 别名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 别名]
def merge_index_info():
"""
合并 wind,ifind 数据到对应名称的表中
:return:
"""
table_name = 'index_info'
logging.info("更新 %s 开始", table_name)
has_table = engine_md.has_table(table_name)
ifind_table_name = 'ifind_{table_name}'.format(table_name=table_name)
wind_table_name = 'wind_{table_name}'.format(table_name=table_name)
# ifind_model = TABLE_MODEL_DIC[ifind_table_name]
# wind_model = TABLE_MODEL_DIC[wind_table_name]
# with with_db_session(engine_md) as session:
# session.query(ifind_model, wind_model).filter(ifind_model.c.ths_code == wind_model.c.wind_code)
ifind_sql_str = "select * from {table_name}".format(table_name=ifind_table_name)
wind_sql_str = "select * from {table_name}".format(table_name=wind_table_name)
ifind_df = pd.read_sql(ifind_sql_str, engine_md) # , index_col='ths_code'
wind_df = pd.read_sql(wind_sql_str, engine_md) # , index_col='wind_code'
joined_df = pd.merge(ifind_df, wind_df, how='outer',
left_on='ths_code', right_on='wind_code', indicator='indicator_column')
col_merge_dic = {
'unique_code': (String(20), prefer_left, {'left_key': 'ths_code', 'right_key': 'wind_code'}),
'sec_name': (String(20), prefer_left, {'left_key': 'ths_index_short_name_index', 'right_key': 'sec_name'}),
'crm_issuer': (String(20), prefer_left, {'left_key': 'ths_publish_org_index', 'right_key': 'crm_issuer'}),
'base_date': (
Date, prefer_left, {'left_key': 'ths_index_base_period_index', 'right_key': 'basedate'}),
'basevalue': (DOUBLE, prefer_left, {'left_key': 'ths_index_base_point_index', 'right_key': 'basevalue'}),
'country': (String(20), get_value, {'key': 'country'}),
'launchdate': (Date, get_value, {'key': 'launchdate'}),
'index_code': (String(20), get_value, {'key': 'ths_index_code_index'}),
'index_category': (String(10), get_value, {'key': 'ths_index_category_index'}),
}
col_merge_rule_dic = {
key: (val[1], val[2]) for key, val in col_merge_dic.items()
}
dtype = {
key: val[0] for key, val in col_merge_dic.items()
}
data_df = merge_data(joined_df, col_merge_rule_dic)
data_count = bunch_insert_on_duplicate_update(data_df, table_name, engine_md, dtype)
logger.info('%s 新增或更新记录 %d 条', table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
# build_primary_key([table_name])
create_pk_str = """ALTER TABLE {table_name}
CHANGE COLUMN `unique_code` `unique_code` VARCHAR(20) NOT NULL ,
ADD PRIMARY KEY (`unique_code`)""".format(table_name=table_name)
with with_db_session(engine_md) as session:
session.execute(create_pk_str)
return data_df