本文整理匯總了Python中sqlalchemy.dialects.mysql.DOUBLE屬性的典型用法代碼示例。如果您正苦於以下問題:Python mysql.DOUBLE屬性的具體用法?Python mysql.DOUBLE怎麽用?Python mysql.DOUBLE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類sqlalchemy.dialects.mysql
的用法示例。
在下文中一共展示了mysql.DOUBLE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: import_coin_info
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def import_coin_info(chain_param=None):
"""獲取全球交易幣基本信息"""
table_name = 'tushare_coin_info'
has_table = engine_md.has_table(table_name)
# 設置 dtype
dtype = {
'coin': String(60),
'en_name': String(60),
'cn_name': String(60),
'issue_date': Date,
'amount': DOUBLE,
}
coinlist_df = pro.coinlist(start_date='20170101', end_date=date_2_str(date.today(), DATE_FORMAT_STR))
data_count = bunch_insert_on_duplicate_update(coinlist_df, table_name, engine_md, dtype)
logging.info("更新 %s 完成 新增數據 %d 條", table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
create_pk_str = """ALTER TABLE {table_name}
CHANGE COLUMN `coin` `coin` VARCHAR(60) NOT NULL FIRST,
CHANGE COLUMN `en_name` `en_name` VARCHAR(60) NOT NULL AFTER `coin`,
ADD PRIMARY KEY (`coin`, `en_name`)""".format(table_name=table_name)
with with_db_session(engine_md) as session:
session.execute(create_pk_str)
示例2: add_new_col_data
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)ckpv表增加數據
3)第二部不見得1天能夠完成,當第二部完成後,將ckvp數據更新daily表中
:param col_name:增加字段名稱
:param param: 參數
:param chain_param: 該參數僅用於 task.chain 串行操作時,上下傳遞參數使用
:param db_col_name: 默認為 None,此時與col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不區分大小寫
:param ths_code_set: 默認 None, 否則僅更新指定 ths_code
:return:
"""
table_name = 'ifind_stock_daily_ds'
if db_col_name is None:
# 默認為 None,此時與col_name相同
db_col_name = col_name
# 檢查當前數據庫是否存在 db_col_name 列,如果不存在則添加該列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 將數據增量保存到 ckdvp 表
all_finished = add_data_2_ckdvp(col_name, param, ths_code_set)
# 將數據更新到 ds 表中
if all_finished:
sql_str = """update {table_name} daily, ifind_ckdvp_stock ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(table_name=table_name, db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 %s 表', db_col_name, table_name)
示例3: add_new_col_data_to_fin
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def add_new_col_data_to_fin(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 fin 表,增加字段
2)ckpv表增加數據
3)第二部不見得1天能夠完成,當第二部完成後,將ckvp數據更新fin表中
:param col_name:增加字段名稱
:param param: 參數
:param chain_param: 該參數僅用於 task.chain 串行操作時,上下傳遞參數使用
:param dtype: 數據庫字段類型
:param db_col_name: 默認為 None,此時與col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不區分大小寫
:param ths_code_set: 默認 None, 否則僅更新指定 ths_code
:return:
"""
table_name = 'ifind_stock_fin'
if db_col_name is None:
# 默認為 None,此時與col_name相同
db_col_name = col_name
# 檢查當前數據庫是否存在 db_col_name 列,如果不存在則添加該列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 將數據增量保存到 ckdvp 表
all_finished = add_data_fin_2_ckdvp(col_name, param, ths_code_set)
# 將數據更新到 ds 表中
if all_finished:
sql_str = """update {table_name} daily, ifind_stock_ckdvp ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(table_name=table_name, db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 %s 表', db_col_name, table_name)
示例4: save_future_daily_df_list
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def save_future_daily_df_list(data_df_list):
"""將期貨曆史數據保存的數據庫"""
data_df_count = len(data_df_list)
if data_df_count > 0:
logger.info('merge data with %d df', data_df_count)
data_df = pd.concat(data_df_list)
data_count = data_df.shape[0]
data_df.to_sql('ifind_future_daily', engine_md, if_exists='append', index=False,
dtype={
'ths_code': String(20),
'time': Date,
'preClose': String(20),
'open': DOUBLE,
'high': DOUBLE,
'low': DOUBLE,
'close': DOUBLE,
'volume': DOUBLE,
'amount': DOUBLE,
'avgPrice': DOUBLE,
'change': DOUBLE,
'changeRatio': DOUBLE,
'preSettlement': DOUBLE,
'settlement': DOUBLE,
'change_settlement': DOUBLE,
'chg_settlement': DOUBLE,
'openInterest': DOUBLE,
'positionChange': DOUBLE,
'amplitude': DOUBLE,
})
logger.info("更新 wind_future_daily 結束 %d 條記錄被更新", data_count)
else:
logger.info("更新 wind_future_daily 結束 0 條記錄被更新")
示例5: add_new_col_data
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE', ths_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)ckpv表增加數據
3)第二部不見得1天能夠完成,當第二部完成後,將ckvp數據更新daily表中
:param chain_param: 該參數僅用於 task.chain 串行操作時,上下傳遞參數使用
:param col_name:增加字段名稱
:param param: 參數
:param db_col_name: 默認為 None,此時與col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不區分大小寫
:param ths_code_set: 默認 None, 否則僅更新指定 ths_code
:return:
"""
if db_col_name is None:
# 默認為 None,此時與col_name相同
db_col_name = col_name
table_name = 'ifind_index_daily_ds'
# 檢查當前數據庫是否存在 db_col_name 列,如果不存在則添加該列
add_col_2_table(engine_md, table_name, db_col_name, col_type_str)
# 將數據增量保存到 ckdvp 表
all_finished = add_data_2_ckdvp(col_name, param, ths_code_set)
# 將數據更新到 ds 表中
if all_finished:
sql_str = """update ifind_index_daily_ds daily, ifind_ckdvp_index ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.ths_code = ckdvp.ths_code
and daily.time = ckdvp.time
and ckdvp.key = '{db_col_name}'
and ckdvp.param = '{param}'""".format(db_col_name=db_col_name, param=param)
with with_db_session(engine_md) as session:
session.execute(sql_str)
session.commit()
logger.info('更新 %s 字段 ifind_index_daily_ds 表', db_col_name)
示例6: add_new_col_data
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE',
wind_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)wind_ckdvp_stock表增加數據
3)第二部不見得1天能夠完成,當第二部完成後,將wind_ckdvp_stock數據更新daily表中
:param col_name:增加字段名稱
:param param: 參數
:param chain_param: 在celery 中將前麵結果做爲參數傳給後麵的任務
:param db_col_name: 默認為 None,此時與col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不區分大小寫
:param wind_code_set: 默認 None, 否則僅更新指定 wind_code
:return:
"""
if db_col_name is None:
# 默認為 None,此時與col_name相同
db_col_name = col_name
# 檢查當前數據庫是否存在 db_col_name 列,如果不存在則添加該列
add_col_2_table(engine_md, 'wind_stock_daily', db_col_name, col_type_str)
# 將數據增量保存到 wind_ckdvp_stock 表
all_finished = add_data_2_ckdvp(col_name, param, wind_code_set)
# 將數據更新到 ds 表中
# 對表的列進行整合,daily表的列屬性值插入wind_ckdvp_stock的value 根據所有條件進行判定
if all_finished:
sql_str = """
update wind_stock_daily daily, wind_ckdvp_stock ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.wind_code = ckdvp.wind_code
and ckdvp.key = '{db_col_name}' and ckdvp.param = '{param}'
and ckdvp.time = daily.trade_date""".format(db_col_name=db_col_name, param=param)
# 進行事務提交
with with_db_session(engine_md) as session:
rst = session.execute(sql_str)
data_count = rst.rowcount
session.commit()
logger.info('更新 %s 字段 wind_stock_daily 表 %d 條記錄', db_col_name, data_count)
示例7: insert_into_db
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def insert_into_db(data_df_list, engine_md):
data_count = len(data_df_list)
table_name = 'wind_stock_tick'
has_table = engine_md.has_table(table_name)
param_list = [
('datetime', DateTime),
('open', DOUBLE),
('high', DOUBLE),
('low', DOUBLE),
('close', DOUBLE),
('ask1', DOUBLE),
('bid1', DOUBLE),
('asize1', DOUBLE),
('bsize1', DOUBLE),
('volume', DOUBLE),
('amount', DOUBLE),
('preclose', DOUBLE),
]
dtype = {key: val for key, val in param_list}
dtype['wind_code'] = String(20)
if data_count > 0:
data_df_all = pd.concat(data_df_list)
data_df_all.index.rename('datetime', inplace=True)
data_df_all.reset_index(inplace=True)
bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md, dtype=dtype)
logger.info('%d data imported', data_df_all.shape[0])
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
return data_count
示例8: fund_nav_df_2_sql
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def fund_nav_df_2_sql(table_name, fund_nav_df, engine_md, is_append=True):
col_name_param_list = [
('NAV_DATE', Date),
('NAV', DOUBLE),
('NAV_ACC', DOUBLE),
]
col_name_dic = {col_name.upper(): col_name.lower() for col_name, _ in col_name_param_list}
dtype = {col_name.lower(): val for col_name, val in col_name_param_list}
dtype['wind_code'] = String(200)
dtype['trade_date'] = Date
# print('reorg dfnav data[%d, %d]' % fund_nav_df.shape)
try:
fund_nav_df['NAV_DATE'] = pd.to_datetime(fund_nav_df['NAV_DATE']).apply(lambda x: x.date())
except Exception as exp:
logger.exception(str(fund_nav_df['NAV_DATE']))
return None
trade_date_s = pd.to_datetime(fund_nav_df.index)
trade_date_latest = trade_date_s.max().date()
fund_nav_df['trade_date'] = trade_date_s
fund_nav_df.rename(columns=col_name_dic, inplace=True)
# fund_nav_df['trade_date'] = trade_date_s
fund_nav_df.set_index(['wind_code', 'trade_date'], inplace=True)
fund_nav_df.reset_index(inplace=True)
# action_str = 'append' if is_append else 'replace'
# print('df--> sql fundnav table if_exists="%s"' % action_str)
bunch_insert_on_duplicate_update(fund_nav_df, table_name, engine_md, dtype=dtype)
# fund_nav_df.to_sql(table_name, engine_md, if_exists=action_str, index_label=['wind_code', 'trade_date'],
# dtype={
# 'wind_code': String(200),
# 'nav_date': Date,
# 'trade_date': Date,
# }) # , index=False
logger.info('%d data inserted', fund_nav_df.shape[0])
return trade_date_latest
示例9: add_new_col_data
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def add_new_col_data(col_name, param, chain_param=None, db_col_name=None, col_type_str='DOUBLE',
wind_code_set: set = None):
"""
1)修改 daily 表,增加字段
2)wind_ckdvp_stock_hk表增加數據
3)第二部不見得1天能夠完成,當第二部完成後,將wind_ckdvp_stock_hk數據更新daily表中
:param col_name:增加字段名稱
:param param: 參數
:param chain_param: 在celery 中將前麵結果做爲參數傳給後麵的任務
:param db_col_name: 默認為 None,此時與col_name相同
:param col_type_str: DOUBLE, VARCHAR(20), INTEGER, etc. 不區分大小寫
:param wind_code_set: 默認 None, 否則僅更新指定 wind_code
:return:
"""
if db_col_name is None:
# 默認為 None,此時與col_name相同
db_col_name = col_name
# 檢查當前數據庫是否存在 db_col_name 列,如果不存在則添加該列
add_col_2_table(engine_md, 'wind_stock_daily_hk', db_col_name, col_type_str)
# 將數據增量保存到 wind_ckdvp_stock_hk 表
all_finished = add_data_2_ckdvp(col_name, param, wind_code_set)
# 將數據更新到 ds 表中
# 對表的列進行整合,daily表的列屬性值插入wind_ckdvp_stock_hk的value 根據所有條件進行判定
if all_finished:
sql_str = """
update wind_stock_daily_hk daily, wind_ckdvp_stock_hk ckdvp
set daily.{db_col_name} = ckdvp.value
where daily.wind_code = ckdvp.wind_code
and ckdvp.key = '{db_col_name}' and ckdvp.param = '{param}'
and ckdvp.time = daily.trade_date""".format(db_col_name=db_col_name, param=param)
# 進行事務提交
with with_db_session(engine_md) as session:
rst = session.execute(sql_str)
data_count = rst.rowcount
session.commit()
logger.info('更新 %s 字段 wind_stock_daily_hk 表 %d 條記錄', db_col_name, data_count)
示例10: import_index_info
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def import_index_info(chain_param=None, ths_code=None):
"""
導入 info 表
:param chain_param: 該參數僅用於 task.chain 串行操作時,上下傳遞參數使用
:param ths_code:
:param refresh:
:return:
"""
table_name = 'ifind_index_info'
has_table = engine_md.has_table(table_name)
logging.info("更新 ifind_index_info 開始")
if ths_code is None:
# 獲取全市場股票代碼及名稱
date_end = date.today()
stock_code_set = set()
stock_code_set_sub = get_stock_code_set(date_end)
if stock_code_set_sub is not None:
stock_code_set |= stock_code_set_sub
ths_code = ','.join(stock_code_set)
indicator_param_list = [
('ths_index_short_name_index', '', String(20)),
('ths_index_code_index', '', String(10)),
('ths_index_category_index', '', String(20)),
('ths_index_base_period_index', '', Date),
('ths_index_base_point_index', '', DOUBLE),
('ths_publish_org_index', '', String(20)),
]
# indicator' = 'ths_index_short_name_index;ths_index_code_index;ths_thscode_index;ths_index_category_index;
# ths_index_base_period_index;ths_index_base_point_index;ths_publish_org_index',
# param = ';;;;;;'
indicator, param = unzip_join([(key, val) for key, val, _ in indicator_param_list], sep=';')
data_df = invoker.THS_BasicData(ths_code, indicator, param)
if data_df is None or data_df.shape[0] == 0:
logging.info("沒有可用的 index info 可以更新")
return
dtype = {key: val for key, _, val in indicator_param_list}
dtype['ths_code'] = String(20)
data_count = bunch_insert_on_duplicate_update(data_df, table_name, engine_md, dtype)
logging.info("更新 %s 完成 存量數據 %d 條", table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
# 更新 code_mapping 表
update_from_info_table(table_name)
示例11: import_tushare_block_trade
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def import_tushare_block_trade(chain_param=None):
"""
插入股票日線數據到最近一個工作日-1。
如果超過 BASE_LINE_HOUR 時間,則獲取當日的數據
:return:
"""
table_name = 'tushare_block_trade'
logging.info("更新 %s 開始", table_name)
param_list = [
('trade_date', Date),
('ts_code', String(20)),
('price', DOUBLE),
('vol', DOUBLE),
('amount', DOUBLE),
('buyer', String(100)),
('seller', String(100)),
]
has_table = engine_md.has_table(table_name)
# 進行表格判斷,確定是否含有 table_name
if has_table:
sql_str = f"""select cal_date
FROM
(
select * from tushare_trade_date trddate
where( cal_date>(SELECT max(trade_date) FROM {table_name}))
)tt
where (is_open=1
and cal_date <= if(hour(now())<16, subdate(curdate(),1), curdate())
and exchange='SSE') """
else:
# 2003-08-02 大宗交易製度開始實施
sql_str = """SELECT cal_date FROM tushare_trade_date trddate WHERE (trddate.is_open=1
AND cal_date <= if(hour(now())<16, subdate(curdate(),1), curdate())
AND exchange='SSE' AND cal_date>='2003-08-02') ORDER BY cal_date"""
logger.warning('%s 不存在,僅使用 tushare_trade_date 表進行計算日期範圍', table_name)
with with_db_session(engine_md) as session:
# 獲取交易日數據
table = session.execute(sql_str)
trade_date_list = list(row[0] for row in table.fetchall())
# 設置 dtype
dtype = {key: val for key, val in param_list}
try:
trade_date_list_len = len(trade_date_list)
for num, trade_date in enumerate(trade_date_list, start=1):
trade_date = datetime_2_str(trade_date, STR_FORMAT_DATE_TS)
data_df = invoke_block_trade(trade_date=trade_date)
if len(data_df) > 0:
# 當前表不設置主鍵,由於存在重複記錄,因此無法設置主鍵
# 例如:002325.SZ 2014-11-17 華泰證券股份有限公司沈陽光榮街證券營業部 兩筆完全相同的大宗交易
data_count = bunch_insert(
data_df, table_name=table_name, dtype=dtype)
logging.info("%d/%d) %s更新 %s 結束 %d 條信息被更新",
num, trade_date_list_len, trade_date, table_name, data_count)
else:
logging.info("%d/%d) %s 無數據信息可被更新", num, trade_date_list_len, trade_date)
except:
logger.exception('更新 %s 表異常', table_name)
示例12: import_trade_date
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def import_trade_date(chain_param=None):
"""
增量導入交易日數據導數據庫表 wind_trade_date,默認導入未來300天的交易日數據
2018-01-17 增加港股交易日數據,眼前考慮對減少對已有代碼的衝擊,新建一張 wind_trade_date_hk表
日後將會考慮將兩張表進行合並
:return:
"""
table_name = TABLE_NAME
exch_code_trade_date_dic = {}
has_table = engine_md.has_table(table_name)
if has_table:
with with_db_session(engine_md) as session:
try:
table = session.execute('SELECT exchange,max(cal_date) FROM {table_name} GROUP BY exchange'.format(
table_name=table_name
))
exch_code_trade_date_dic = {exch_code: trade_date for exch_code, trade_date in table.fetchall()}
except Exception as exp:
logger.exception("交易日獲取異常")
exchange_code_dict = {
"HKEX": "香港聯合交易所",
"SZSE": "深圳證券交易所",
"SSE": "上海證券交易所",
}
exchange_code_list = list(exchange_code_dict.keys())
for exchange_code in exchange_code_list:
if exchange_code in exch_code_trade_date_dic:
trade_date_max = exch_code_trade_date_dic[exchange_code]
start_date_str = (trade_date_max + timedelta(days=1)).strftime(STR_FORMAT_DATE_TS)
else:
start_date_str = '19900101'
end_date_str = (date.today() + timedelta(days=310)).strftime(STR_FORMAT_DATE_TS)
trade_date_df = pro.trade_cal(exchange_id='', start_date=start_date_str, end_date=end_date_str)
if trade_date_df is None or trade_date_df.shape[0] == 0:
logger.warning('%s[%s] [%s - %s] 沒有查詢到交易日期',
exchange_code_dict[exchange_code], exchange_code, start_date_str, end_date_str)
continue
date_count = trade_date_df.shape[0]
logger.info("%s[%s] %d 條交易日數據將被導入 %s",
exchange_code_dict[exchange_code], exchange_code, date_count, table_name)
date_count = bunch_insert_on_duplicate_update(trade_date_df, table_name, engine_md, dtype={
'exchange': String(10),
'cal_date': Date,
'is_open': DOUBLE,
}, myisam_if_create_table=True, primary_keys=['exchange', 'cal_date'], schema=config.DB_SCHEMA_MD)
logger.info('%s[%s] %d 條交易日數據導入 %s 完成',
exchange_code_dict[exchange_code], exchange_code, date_count, table_name)
示例13: update_df_2_db
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def update_df_2_db(instrument_type, table_name, data_df):
"""將 DataFrame 數據保存到 數據庫對應的表中"""
dtype = {
'trade_date': Date,
'Contract': String(20),
'ContractNext': String(20),
'Close': DOUBLE,
'CloseNext': DOUBLE,
'Volume': DOUBLE,
'VolumeNext': DOUBLE,
'OI': DOUBLE,
'OINext': DOUBLE,
'Open': DOUBLE,
'OpenNext': DOUBLE,
'High': DOUBLE,
'HighNext': DOUBLE,
'Low': DOUBLE,
'LowNext': DOUBLE,
'Amount': DOUBLE,
'AmountNext': DOUBLE,
'adj_factor_main': DOUBLE,
'adj_factor_secondary': DOUBLE,
'instrument_type': String(20),
}
# 為了解決 AttributeError: 'numpy.float64' object has no attribute 'translate' 錯誤,需要將數據類型轉換成 float
data_df["Close"] = data_df["Close"].apply(str_2_float)
data_df["CloseNext"] = data_df["CloseNext"].apply(str_2_float)
data_df["Volume"] = data_df["Volume"].apply(str_2_float)
data_df["VolumeNext"] = data_df["VolumeNext"].apply(str_2_float)
data_df["OI"] = data_df["OI"].apply(str_2_float)
data_df["OINext"] = data_df["OINext"].apply(str_2_float)
data_df["Open"] = data_df["Open"].apply(str_2_float)
data_df["OpenNext"] = data_df["OpenNext"].apply(str_2_float)
data_df["High"] = data_df["High"].apply(str_2_float)
data_df["HighNext"] = data_df["HighNext"].apply(str_2_float)
data_df["Low"] = data_df["Low"].apply(str_2_float)
data_df["LowNext"] = data_df["LowNext"].apply(str_2_float)
data_df["Amount"] = data_df["Amount"].apply(str_2_float)
data_df["AmountNext"] = data_df["AmountNext"].apply(str_2_float)
data_df["adj_factor_main"] = data_df["adj_factor_main"].apply(str_2_float)
data_df["adj_factor_secondary"] = data_df["adj_factor_secondary"].apply(str_2_float)
# 清理曆史記錄
with with_db_session(engine_md) as session:
sql_str = """SELECT table_name FROM information_schema.TABLES
WHERE table_name = :table_name and TABLE_SCHEMA=(select database())"""
# 複權數據表
is_existed = session.execute(sql_str, params={"table_name": table_name}).fetchone()
if is_existed is not None:
session.execute("delete from %s where instrument_type = :instrument_type" % table_name,
params={"instrument_type": instrument_type})
logger.debug("刪除 %s 中的 %s 曆史數據", table_name, instrument_type)
# 插入數據庫
bunch_insert(data_df, table_name=table_name, dtype=dtype, primary_keys=['trade_date', 'Contract'])
示例14: import_tushare_stock_fund_holdings
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def import_tushare_stock_fund_holdings():
table_name = 'tushare_stock_fund_holdings'
logging.info("更新 %s 開始", table_name)
has_table = engine_md.has_table(table_name)
tushare_fund_holdings_indicator_param_list = [
('ts_code', String(20)),
('sec_name', String(20)),
('end_date', Date),
('nums', DOUBLE),
('nlast', DOUBLE),
('count', DOUBLE),
('clast', DOUBLE),
('amount', DOUBLE),
('ratio', DOUBLE),
]
tushare_fund_holdings_dtype = {key: val for key, val in tushare_fund_holdings_indicator_param_list}
data_df_list, data_count, all_data_count, = [], 0, 0
years = list(range(2013, 2019))
try:
for year in years:
for quarter in list([1, 2, 3, 4]):
print((year, quarter))
data_df = invoke_fund_holdings(year, quarter)
ts_code_list = []
for i in data_df.code:
if i[0] == '6':
sh = i + '.SH'
ts_code_list.append(sh)
else:
sz = i + '.SZ'
ts_code_list.append(sz)
data_df.code = ts_code_list
data_df = data_df.rename(columns={'code': 'ts_code', 'name': 'sec_name', 'date': 'end_date'})
# 把數據攢起來
if data_df is not None and data_df.shape[0] > 0:
data_count += data_df.shape[0]
data_df_list.append(data_df)
# 大於閥值有開始插入
if data_count >= 50:
data_df_all = pd.concat(data_df_list)
bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md, tushare_fund_holdings_dtype)
all_data_count += data_count
data_df_list, data_count = [], 0
finally:
if len(data_df_list) > 0:
data_df_all = pd.concat(data_df_list)
data_count = bunch_insert_on_duplicate_update(data_df_all, table_name, engine_md,
tushare_fund_holdings_dtype)
all_data_count = all_data_count + data_count
logging.info("更新 %s 結束 %d 條信息被更新", table_name, all_data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
build_primary_key([table_name])
示例15: merge_index_info
# 需要導入模塊: from sqlalchemy.dialects import mysql [as 別名]
# 或者: from sqlalchemy.dialects.mysql import DOUBLE [as 別名]
def merge_index_info():
"""
合並 wind,ifind 數據到對應名稱的表中
:return:
"""
table_name = 'index_info'
logging.info("更新 %s 開始", table_name)
has_table = engine_md.has_table(table_name)
ifind_table_name = 'ifind_{table_name}'.format(table_name=table_name)
wind_table_name = 'wind_{table_name}'.format(table_name=table_name)
# ifind_model = TABLE_MODEL_DIC[ifind_table_name]
# wind_model = TABLE_MODEL_DIC[wind_table_name]
# with with_db_session(engine_md) as session:
# session.query(ifind_model, wind_model).filter(ifind_model.c.ths_code == wind_model.c.wind_code)
ifind_sql_str = "select * from {table_name}".format(table_name=ifind_table_name)
wind_sql_str = "select * from {table_name}".format(table_name=wind_table_name)
ifind_df = pd.read_sql(ifind_sql_str, engine_md) # , index_col='ths_code'
wind_df = pd.read_sql(wind_sql_str, engine_md) # , index_col='wind_code'
joined_df = pd.merge(ifind_df, wind_df, how='outer',
left_on='ths_code', right_on='wind_code', indicator='indicator_column')
col_merge_dic = {
'unique_code': (String(20), prefer_left, {'left_key': 'ths_code', 'right_key': 'wind_code'}),
'sec_name': (String(20), prefer_left, {'left_key': 'ths_index_short_name_index', 'right_key': 'sec_name'}),
'crm_issuer': (String(20), prefer_left, {'left_key': 'ths_publish_org_index', 'right_key': 'crm_issuer'}),
'base_date': (
Date, prefer_left, {'left_key': 'ths_index_base_period_index', 'right_key': 'basedate'}),
'basevalue': (DOUBLE, prefer_left, {'left_key': 'ths_index_base_point_index', 'right_key': 'basevalue'}),
'country': (String(20), get_value, {'key': 'country'}),
'launchdate': (Date, get_value, {'key': 'launchdate'}),
'index_code': (String(20), get_value, {'key': 'ths_index_code_index'}),
'index_category': (String(10), get_value, {'key': 'ths_index_category_index'}),
}
col_merge_rule_dic = {
key: (val[1], val[2]) for key, val in col_merge_dic.items()
}
dtype = {
key: val[0] for key, val in col_merge_dic.items()
}
data_df = merge_data(joined_df, col_merge_rule_dic)
data_count = bunch_insert_on_duplicate_update(data_df, table_name, engine_md, dtype)
logger.info('%s 新增或更新記錄 %d 條', table_name, data_count)
if not has_table and engine_md.has_table(table_name):
alter_table_2_myisam(engine_md, [table_name])
# build_primary_key([table_name])
create_pk_str = """ALTER TABLE {table_name}
CHANGE COLUMN `unique_code` `unique_code` VARCHAR(20) NOT NULL ,
ADD PRIMARY KEY (`unique_code`)""".format(table_name=table_name)
with with_db_session(engine_md) as session:
session.execute(create_pk_str)
return data_df