本文整理汇总了Python中tools.rows_to_list函数的典型用法代码示例。如果您正苦于以下问题:Python rows_to_list函数的具体用法?Python rows_to_list怎么用?Python rows_to_list使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rows_to_list函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_bulk_round_trip
def _test_bulk_round_trip(self, nodes, partitioner, num_records):
"""
Test exporting a large number of rows into a csv file.
"""
self.prepare(nodes=nodes, partitioner=partitioner)
self.node1.stress(['write', 'n={}'.format(num_records), '-rate', 'threads=50'])
stress_table = 'keyspace1.standard1'
self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table))))
self.tempfile = NamedTemporaryFile(delete=False)
debug('Exporting to csv file: {}'.format(self.tempfile.name))
start = datetime.datetime.now()
self.node1.run_cqlsh(cmds="COPY {} TO '{}'".format(stress_table, self.tempfile.name))
debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))
# check all records were exported
self.assertEqual(num_records, sum(1 for line in open(self.tempfile.name)))
self.session.execute("TRUNCATE {}".format(stress_table))
debug('Importing from csv file: {}'.format(self.tempfile.name))
start = datetime.datetime.now()
self.node1.run_cqlsh(cmds="COPY {} FROM '{}'".format(stress_table, self.tempfile.name))
debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))
self.assertEqual([[num_records]], rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}".format(stress_table))))
示例2: table_test
def table_test(self):
"""
CREATE TABLE, ALTER TABLE, TRUNCATE TABLE, DROP TABLE statements
"""
session = self.prepare()
session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
session.execute("ALTER TABLE test1 ADD v2 int")
for i in xrange(0, 10):
session.execute("INSERT INTO test1 (k, v1, v2) VALUES (%d, %d, %d)" % (i, i, i))
session.execute("INSERT INTO test2 (k, c1, v1) VALUES (%d, %d, %d)" % (i, i, i))
res = sorted(session.execute("SELECT * FROM test1"))
assert rows_to_list(res) == [[i, i, i] for i in xrange(0, 10)], res
res = sorted(session.execute("SELECT * FROM test2"))
assert rows_to_list(res) == [[i, i, i] for i in xrange(0, 10)], res
session.execute("TRUNCATE test1")
session.execute("TRUNCATE test2")
res = session.execute("SELECT * FROM test1")
assert rows_to_list(res) == [], res
res = session.execute("SELECT * FROM test2")
assert rows_to_list(res) == [], res
session.execute("DROP TABLE test1")
session.execute("DROP TABLE test2")
assert_invalid(session, "SELECT * FROM test1", expected=InvalidRequest)
assert_invalid(session, "SELECT * FROM test2", expected=InvalidRequest)
示例3: test_query_indexes_with_vnodes
def test_query_indexes_with_vnodes(self):
"""
Verifies correct query behaviour in the presence of vnodes
@jira_ticket CASSANDRA-11104
"""
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': '1'};")
session.execute("CREATE TABLE ks.compact_table (a int PRIMARY KEY, b int) WITH COMPACT STORAGE;")
session.execute("CREATE INDEX keys_index ON ks.compact_table (b);")
session.execute("CREATE TABLE ks.regular_table (a int PRIMARY KEY, b int)")
session.execute("CREATE INDEX composites_index on ks.regular_table (b)")
insert_args = [(i, i % 2) for i in xrange(100)]
execute_concurrent_with_args(session,
session.prepare("INSERT INTO ks.compact_table (a, b) VALUES (?, ?)"),
insert_args)
execute_concurrent_with_args(session,
session.prepare("INSERT INTO ks.regular_table (a, b) VALUES (?, ?)"),
insert_args)
res = session.execute("SELECT * FROM ks.compact_table WHERE b = 0")
self.assertEqual(len(rows_to_list(res)), 50)
res = session.execute("SELECT * FROM ks.regular_table WHERE b = 0")
self.assertEqual(len(rows_to_list(res)), 50)
示例4: drop_column_and_restart_test
def drop_column_and_restart_test(self):
"""
Simply insert data in a table, drop a column involved in the insert and restart the node afterwards.
This ensures that the dropped_columns system table is properly flushed on the alter or the restart
fails as in CASSANDRA-11050.
@jira_ticket CASSANDRA-11050
"""
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE t (k int PRIMARY KEY, c1 int, c2 int)")
session.execute("INSERT INTO t (k, c1, c2) VALUES (0, 0, 0)")
session.execute("ALTER TABLE t DROP c2")
rows = session.execute("SELECT * FROM t")
self.assertEqual([[0, 0]], rows_to_list(rows))
self.cluster.stop()
self.cluster.start()
session = self.patient_cql_connection(self.cluster.nodelist()[0])
session.execute("USE ks")
rows = session.execute("SELECT * FROM t")
self.assertEqual([[0, 0]], rows_to_list(rows))
示例5: query_user
def query_user(self, session, userid, age, consistency, check_ret=True):
statement = SimpleStatement("SELECT userid, age FROM users where userid = %d" % (userid,), consistency_level=consistency)
res = session.execute(statement)
expected = [[userid, age]] if age else []
ret = rows_to_list(res) == expected
if check_ret:
assert ret, "Got %s from %s, expected %s at %s" % (rows_to_list(res), session.cluster.contact_points, expected, self._name(consistency))
return ret
示例6: query_user
def query_user(self, session, userid, age, consistency, check_ret=True):
statement = SimpleStatement("SELECT userid, age FROM users where userid = {}".format(userid), consistency_level=consistency)
res = session.execute(statement)
expected = [[userid, age]] if age else []
ret = rows_to_list(res) == expected
if check_ret:
self.assertTrue(ret, "Got {} from {}, expected {} at {}".format(rows_to_list(res), session.cluster.contact_points, expected, consistency_value_to_name(consistency)))
return ret
示例7: test_commitlog_replay_on_startup
def test_commitlog_replay_on_startup(self):
"""
Test commit log replay
"""
node1 = self.node1
node1.set_configuration_options(batch_commitlog=True)
node1.start()
debug("Insert data")
session = self.patient_cql_connection(node1)
self.create_ks(session, 'Test', 1)
session.execute("""
CREATE TABLE users (
user_name varchar PRIMARY KEY,
password varchar,
gender varchar,
state varchar,
birth_year bigint
);
""")
session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
"VALUES('gandalf', '[email protected]$$', 'male', 'WA', 1955);")
debug("Verify data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
self.assertItemsEqual(rows_to_list(res),
[[u'gandalf', 1955, u'male', u'[email protected]$$', u'WA']])
debug("Stop node abruptly")
node1.stop(gently=False)
debug("Verify commitlog was written before abrupt stop")
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
commitlog_files = os.listdir(commitlog_dir)
self.assertTrue(len(commitlog_files) > 0)
debug("Verify no SSTables were flushed before abrupt stop")
self.assertEqual(0, len(node1.get_sstables('test', 'users')))
debug("Verify commit log was replayed on startup")
node1.start()
node1.watch_log_for("Log replay complete")
# Here we verify from the logs that some mutations were replayed
replays = [match_tuple[0] for match_tuple in node1.grep_log(" \d+ replayed mutations")]
debug('The following log lines indicate that mutations were replayed: {msgs}'.format(msgs=replays))
num_replayed_mutations = [
parse('{} {num_mutations:d} replayed mutations{}', line).named['num_mutations']
for line in replays
]
# assert there were some lines where more than zero mutations were replayed
self.assertNotEqual([m for m in num_replayed_mutations if m > 0], [])
debug("Make query and ensure data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
self.assertItemsEqual(rows_to_list(res),
[[u'gandalf', 1955, u'male', u'[email protected]$$', u'WA']])
示例8: test_commitlog_replay_on_startup
def test_commitlog_replay_on_startup(self):
""" Test commit log replay """
node1 = self.node1
node1.set_configuration_options(batch_commitlog=True)
node1.start()
debug("Insert data")
session = self.patient_cql_connection(node1)
self.create_ks(session, 'Test', 1)
session.execute("""
CREATE TABLE users (
user_name varchar PRIMARY KEY,
password varchar,
gender varchar,
state varchar,
birth_year bigint
);
""")
session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
"VALUES('gandalf', '[email protected]$$', 'male', 'WA', 1955);")
debug("Verify data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
self.assertItemsEqual(rows_to_list(res),
[[u'gandalf', 1955, u'male', u'[email protected]$$', u'WA']])
debug("Stop node abruptly")
node1.stop(gently=False)
debug("Verify commitlog was written before abrupt stop")
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
commitlog_files = os.listdir(commitlog_dir)
self.assertTrue(len(commitlog_files) > 0)
debug("Verify no SSTables were flushed before abrupt stop")
for x in xrange(0, self.cluster.data_dir_count):
data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
cf_id = [s for s in os.listdir(os.path.join(data_dir, "test")) if s.startswith("users")][0]
cf_data_dir = glob.glob("{data_dir}/test/{cf_id}".format(**locals()))[0]
cf_data_dir_files = os.listdir(cf_data_dir)
if "backups" in cf_data_dir_files:
cf_data_dir_files.remove("backups")
self.assertEqual(0, len(cf_data_dir_files))
debug("Verify commit log was replayed on startup")
node1.start()
node1.watch_log_for("Log replay complete")
# Here we verify there was more than 0 replayed mutations
zero_replays = node1.grep_log(" 0 replayed mutations")
self.assertEqual(0, len(zero_replays))
debug("Make query and ensure data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
self.assertItemsEqual(rows_to_list(res),
[[u'gandalf', 1955, u'male', u'[email protected]$$', u'WA']])
示例9: table_test
def table_test(self):
"""
Smoke test that basic table operations work:
- create 2 tables, one with and one without COMPACT STORAGE
- ALTER the table without COMPACT STORAGE, adding a column
For each of those tables:
- insert 10 values
- SELECT * and assert the values are there
- TRUNCATE the table
- SELECT * and assert there are no values
- DROP the table
- SELECT * and assert the statement raises an InvalidRequest
# TODO run SELECTs to make sure each statement works
"""
session = self.prepare()
ks_meta = UpdatingKeyspaceMetadataWrapper(session.cluster, ks_name='ks')
session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
self.assertIn('test1', ks_meta.tables)
session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
self.assertIn('test2', ks_meta.tables)
t1_meta = UpdatingTableMetadataWrapper(session.cluster, ks_name='ks', table_name='test1')
session.execute("ALTER TABLE test1 ADD v2 int")
self.assertIn('v2', t1_meta.columns)
for i in range(0, 10):
session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i))
res = sorted(session.execute("SELECT * FROM test1"))
self.assertEqual(rows_to_list(res), [[i, i, i] for i in range(0, 10)])
res = sorted(session.execute("SELECT * FROM test2"))
self.assertEqual(rows_to_list(res), [[i, i, i] for i in range(0, 10)])
session.execute("TRUNCATE test1")
session.execute("TRUNCATE test2")
res = session.execute("SELECT * FROM test1")
self.assertEqual(rows_to_list(res), [])
res = session.execute("SELECT * FROM test2")
self.assertEqual(rows_to_list(res), [])
session.execute("DROP TABLE test1")
self.assertNotIn('test1', ks_meta.tables)
session.execute("DROP TABLE test2")
self.assertNotIn('test2', ks_meta.tables)
示例10: alter_rf_and_run_read_repair_test
def alter_rf_and_run_read_repair_test(self):
"""
@jira_ticket CASSANDRA-10655
Data responses may skip values for columns not selected by the column filter. This can lead to empty values being
erroneously included in repair mutations sent out by the coordinator.
"""
session = self.patient_cql_connection(self.cluster.nodelist()[0])
session.execute("""CREATE KEYSPACE alter_rf_test
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};""")
session.execute("CREATE TABLE alter_rf_test.t1 (k int PRIMARY KEY, a int, b int);")
session.execute("INSERT INTO alter_rf_test.t1 (k, a, b) VALUES (1, 1, 1);")
cl_one_stmt = SimpleStatement("SELECT * FROM alter_rf_test.t1 WHERE k=1",
consistency_level=ConsistencyLevel.ONE)
# identify the initial replica and trigger a flush to ensure reads come from sstables
initial_replica, non_replicas = self.identify_initial_placement('alter_rf_test', 't1', 1)
debug("At RF=1 replica for data is " + initial_replica.name)
initial_replica.flush()
# At RF=1, it shouldn't matter which node we query, as the actual data should always come from the
# initial replica when reading at CL ONE
for n in self.cluster.nodelist():
debug("Checking " + n.name)
session = self.patient_exclusive_cql_connection(n)
res = rows_to_list(session.execute(cl_one_stmt))
assert res == [[1, 1, 1]], res
# Alter so RF=n but don't repair, then execute a query which selects only a subset of the columns. Run this at
# CL ALL on one of the nodes which doesn't currently have the data, triggering a read repair. Although we're
# only selecting a single column, the expectation is that the entire row is read on each replica to construct
# the digest responses as well as the full data reads for repair. So we expect that after the read repair, all
# replicas will have the entire row
debug("Changing RF from 1 to 3")
session.execute("""ALTER KEYSPACE alter_rf_test
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};""")
cl_all_stmt = SimpleStatement("SELECT a FROM alter_rf_test.t1 WHERE k=1",
consistency_level=ConsistencyLevel.ALL)
debug("Executing SELECT on non-initial replica to trigger read repair " + non_replicas[0].name)
read_repair_session = self.patient_exclusive_cql_connection(non_replicas[0])
res = read_repair_session.execute(cl_all_stmt)
# result of the CL ALL query contains only the selected column
assert rows_to_list(res) == [[1]], res
# Now check the results of the read repair by querying each replica again at CL ONE
debug("Re-running SELECTs at CL ONE to verify read repair")
for n in self.cluster.nodelist():
debug("Checking " + n.name)
session = self.patient_exclusive_cql_connection(n)
res = rows_to_list(session.execute(cl_one_stmt))
assert res == [[1, 1, 1]], res
示例11: table_test
def table_test(self):
"""
Smoke test that basic table operations work:
- create 2 tables, one with and one without COMPACT STORAGE
- ALTER the table without COMPACT STORAGE, adding a column
For each of those tables:
- insert 10 values
- SELECT * and assert the values are there
- TRUNCATE the table
- SELECT * and assert there are no values
- DROP the table
- SELECT * and assert the statement raises an InvalidRequest
# TODO run SELECTs to make sure each statement works
"""
session = self.prepare()
session.execute("CREATE TABLE test1 (k int PRIMARY KEY, v1 int)")
session.execute("CREATE TABLE test2 (k int, c1 int, v1 int, PRIMARY KEY (k, c1)) WITH COMPACT STORAGE")
session.execute("ALTER TABLE test1 ADD v2 int")
for i in range(0, 10):
session.execute("INSERT INTO test1 (k, v1, v2) VALUES ({i}, {i}, {i})".format(i=i))
session.execute("INSERT INTO test2 (k, c1, v1) VALUES ({i}, {i}, {i})".format(i=i))
res = sorted(session.execute("SELECT * FROM test1"))
self.assertEqual(rows_to_list(res), [[i, i, i] for i in range(0, 10)])
res = sorted(session.execute("SELECT * FROM test2"))
self.assertEqual(rows_to_list(res), [[i, i, i] for i in range(0, 10)])
session.execute("TRUNCATE test1")
session.execute("TRUNCATE test2")
res = session.execute("SELECT * FROM test1")
self.assertEqual(rows_to_list(res), [])
res = session.execute("SELECT * FROM test2")
self.assertEqual(rows_to_list(res), [])
session.execute("DROP TABLE test1")
session.execute("DROP TABLE test2")
assert_invalid(session, "SELECT * FROM test1", expected=InvalidRequest)
assert_invalid(session, "SELECT * FROM test2", expected=InvalidRequest)
示例12: drop_column_compaction_test
def drop_column_compaction_test(self):
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
# insert some data.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")
# drop and readd c1.
session.execute("ALTER TABLE cf DROP c1")
session.execute("ALTER TABLE cf ADD c1 int")
# add another row.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")
node = self.cluster.nodelist()[0]
node.flush()
node.compact()
# test that c1 values have been compacted away.
session = self.patient_cql_connection(node)
rows = session.execute("SELECT c1 FROM ks.cf")
self.assertEqual([[None], [None], [None], [4]], sorted(rows_to_list(rows)))
示例13: replace_with_reset_resume_state_test
def replace_with_reset_resume_state_test(self):
"""Test replace with resetting bootstrap progress"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initial_data = rows_to_list(session.execute(query))
node3.stop(gently=False)
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),
storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',
initial_token=None, binary_interface=('127.0.0.4', 9042))
# keep timeout low so that test won't hang
node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
cluster.add(node4, False)
try:
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_other_notice=False)
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node4 bootstrap with resetting bootstrap state
node4.stop()
mark = node4.mark_log()
node4.start(jvm_args=[
"-Dcassandra.replace_address_first_boot=127.0.0.3",
"-Dcassandra.reset_bootstrap_progress=true"
])
# check if we reset bootstrap state
node4.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node4.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
assert_bootstrap_state(self, node4, 'COMPLETED')
# query should work again
debug("Stopping old nodes")
node1.stop(gently=False, wait_other_notice=True)
node2.stop(gently=False, wait_other_notice=True)
debug("Verifying data on new node.")
session = self.patient_exclusive_cql_connection(node4)
assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),
expected=initial_data,
cl=ConsistencyLevel.ONE)
示例14: drop_column_compaction_test
def drop_column_compaction_test(self):
cursor = self.prepare()
cursor.execute("USE ks")
cursor.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
# insert some data.
cursor.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
cursor.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
cursor.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")
# drop and readd c1.
cursor.execute("ALTER TABLE cf DROP c1")
cursor.execute("ALTER TABLE cf ADD c1 int")
# add another row.
cursor.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")
node = self.cluster.nodelist()[0]
node.flush()
node.compact()
# erase info on dropped 'c1' column and restart.
cursor.execute("""UPDATE system.schema_columnfamilies
SET dropped_columns = null
WHERE keyspace_name = 'ks' AND columnfamily_name = 'cf'""")
node.stop(gently=False)
node.start()
time.sleep(.5)
# test that c1 values have been compacted away.
cursor = self.patient_cql_connection(node, version='3.0.10')
rows = cursor.execute("SELECT c1 FROM ks.cf")
self.assertEqual([[None], [None], [None], [4]], sorted(rows_to_list(rows)))
示例15: compact_counter_cluster_test
def compact_counter_cluster_test(self):
"""
@jira_ticket CASSANDRA-12219
This test will fail on 3.0.0 - 3.0.8, and 3.1 - 3.8
"""
cluster = self.cluster
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
self.create_ks(session, 'counter_tests', 1)
session.execute("""
CREATE TABLE IF NOT EXISTS counter_cs (
key bigint PRIMARY KEY,
data counter
) WITH COMPACT STORAGE
""")
for outer in range(0, 5):
for idx in range(0, 5):
session.execute("UPDATE counter_cs SET data = data + 1 WHERE key = {k}".format(k=idx))
for idx in range(0, 5):
row = list(session.execute("SELECT data from counter_cs where key = {k}".format(k=idx)))
self.assertEqual(rows_to_list(row)[0][0], 5)