本文整理汇总了Python中pandas.read_sql_query方法的典型用法代码示例。如果您正苦于以下问题:Python pandas.read_sql_query方法的具体用法?Python pandas.read_sql_query怎么用?Python pandas.read_sql_query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pandas
的用法示例。
在下文中一共展示了pandas.read_sql_query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_datetime
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
示例2: test_datetime_NaT
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
示例3: test_datetime_time
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == 'sqlite':
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
示例4: test_connectable_issue_example
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data',
con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(
'test_foo_data', self.conn)
main(self.conn)
示例5: test_temporary_table
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
示例6: add_to_charge
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def add_to_charge():
engine = grizli_db.get_db_engine()
p = pd.read_sql_query('select distinct p_root from photometry_apcorr', engine)
f = pd.read_sql_query('select distinct field_root from charge_fields', engine)
new_fields = []
for root in p['p_root'].values:
if root not in f['field_root'].values:
print(root)
new_fields.append(root)
df = pd.DataFrame()
df['field_root'] = new_fields
df['comment'] = 'CANDELS'
ix = df['field_root'] == 'j214224m4420'
df['comment'][ix] = 'Rafelski UltraDeep'
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
示例7: count_sources_for_bad_persistence
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def count_sources_for_bad_persistence():
"""
Count the number of extracted objects for each id and look for fields
with few objects, which are usually problems with the persistence mask
"""
import pandas as pd
from grizli.aws import db as grizli_db
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 5 AND mag_auto < 24 group by root;", engine)
counts = utils.GTable.from_pandas(counts)
so = np.argsort(counts['n'])
sh = """
BUCKET=grizli-v
root=j113812m1134
aws s3 rm --recursive s3://grizli-v1/Pipeline/${root}/ --include "*"
grism_run_single.sh ${root} --run_fine_alignment=True --extra_filters=g800l --bucket=grizli-v1 --preprocess_args.skip_single_optical_visits=True --mask_spikes=True --persistence_args.err_threshold=1
"""
示例8: test_read_sql_iris
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
示例9: test_read_sql_view
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
示例10: test_to_sql_series
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
示例11: test_roundtrip
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
示例12: test_roundtrip_chunksize
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
示例13: test_date_and_index
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
示例14: test_timedelta
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_timedelta(self):
# see #6921
df = to_timedelta(
Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
示例15: test_to_sql_index_label
# 需要导入模块: import pandas [as 别名]
# 或者: from pandas import read_sql_query [as 别名]
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'index'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "index_name"
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# index name is integer
temp_frame.index.name = 0
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
temp_frame.index.name = None
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=0)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"