本文整理汇总了Python中cassandra.query.SimpleStatement方法的典型用法代码示例。如果您正苦于以下问题:Python query.SimpleStatement方法的具体用法?Python query.SimpleStatement怎么用?Python query.SimpleStatement使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cassandra.query
的用法示例。
在下文中一共展示了query.SimpleStatement方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_with_no_results
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_with_no_results(self):
"""
No errors when a page is requested and query has no results.
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
# run a query that has no results and make sure it's exhausted
future = cursor.execute_async(
SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
)
pf = PageFetcher(future)
pf.request_all()
assert [] == pf.all_data()
assert not pf.has_more_pages
示例2: test_ttl_deletions
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_ttl_deletions(self):
"""Test ttl deletions. Paging over a query that has only tombstones """
cursor = self.prepare()
self.setup_schema(cursor)
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = self.setup_data(cursor)
# Set TTL to all row
for row in data:
s = ("insert into paging_test (id, mytext, col1, col2, col3) "
"values ({}, '{}', {}, {}, {}) using ttl 3;").format(
row['id'], row['mytext'], row['col1'],
row['col2'], row['col3'])
cursor.execute(SimpleStatement(s, consistency_level=CL.ALL))
time.sleep(5)
self.check_all_paging_results(cursor, [], 0, [])
示例3: _check_counters
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def _check_counters(self):
logger.debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in list(self.expected_counts.keys()):
for key2 in list(self.expected_counts[key1].keys()):
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
assert actual_value == expected_value
示例4: assert_one
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def assert_one(session, query, expected, cl=None):
"""
Assert query returns one row.
@param session Session to use
@param query Query to run
@param expected Expected results from query
@param cl Optional Consistency Level setting. Default ONE
Examples:
assert_one(session, "LIST USERS", ['cassandra', True])
assert_one(session, query, [0, 0])
"""
simple_query = SimpleStatement(query, consistency_level=cl)
res = session.execute(simple_query)
list_res = _rows_to_list(res)
assert list_res == [expected], "Expected {} from {}, but got {}".format([expected], query, list_res)
示例5: assert_all
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def assert_all(session, query, expected, cl=None, ignore_order=False, timeout=None):
"""
Assert query returns all expected items optionally in the correct order
@param session Session in use
@param query Query to run
@param expected Expected results from query
@param cl Optional Consistency Level setting. Default ONE
@param ignore_order Optional boolean flag determining whether response is ordered
@param timeout Optional query timeout, in seconds
Examples:
assert_all(session, "LIST USERS", [['aleksey', False], ['cassandra', True]])
assert_all(self.session1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
"""
simple_query = SimpleStatement(query, consistency_level=cl)
res = session.execute(simple_query) if timeout is None else session.execute(simple_query, timeout=timeout)
list_res = _rows_to_list(res)
if ignore_order:
expected = list_to_hashed_dict(expected)
list_res = list_to_hashed_dict(list_res)
assert list_res == expected, "Expected {} from {}, but got {}".format(expected, query, list_res)
示例6: _put_with_overwrite
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in range(0, 100)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in range(0, 50)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in range(0, 20)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
示例7: insert_row
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def insert_row(cls, *args, **kwds):
def to_db(datum, typ):
mapper = {
'text': lambda x: "'%s'" % x,
'list<bigint>': lambda x: str(x).replace('L', ''),
'list<int>': lambda x: str(x).replace('L', ''),
'int': lambda x: str(x),
'ascii': lambda x: "'%s'" % x,
}
return mapper[typ](datum)
data = kwds['data']
data_keys = [attr.split()[0] for attr in cls.attrs if attr.split()[0] in data.keys()]
data_typs = [attr.split()[1] for attr in cls.attrs if attr.split()[0] in data.keys()]
data_vals = ', '.join([to_db(data[data_keys[k]], data_typs[k]) for k in xrange(len(data_keys))])
qstring = 'INSERT INTO %s (%s) VALUES (%s)' % (cls.__name__, ', '.join(data_keys), data_vals)
#pdb.set_trace()
query = SimpleStatement(qstring, consistency_level=ConsistencyLevel.QUORUM)
session.execute(query)
示例8: test_simple
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_simple(self):
"""
Test the SimpleStrategy on a 3 node cluster
"""
self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)
node1 = self.cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
session.max_trace_wait = 120
replication_factor = 3
create_ks(session, 'test', replication_factor)
session.execute('CREATE TABLE test.test (id int PRIMARY KEY, value text)', trace=False)
for key, token in list(murmur3_hashes.items()):
logger.debug('murmur3 hash key={key},token={token}'.format(key=key, token=token))
query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL)
future = session.execute_async(query, trace=True)
future.result()
block_on_trace(session)
trace = future.get_query_trace(max_wait=120)
self.pprint_trace(trace)
stats = self.get_replicas_from_trace(trace)
replicas_should_be = set(self.get_replicas_for_token(
token, replication_factor))
logger.debug('\nreplicas should be: %s' % replicas_should_be)
logger.debug('replicas were: %s' % stats['replicas'])
# Make sure the correct nodes are replicas:
assert stats['replicas'] == replicas_should_be
# Make sure that each replica node was contacted and
# acknowledged the write:
assert stats['nodes_sent_write'] == stats['nodes_responded_write']
示例9: test_simple_increment
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_simple_increment(self):
""" Simple incrementation test (Created for #3465, that wasn't a bug) """
cluster = self.cluster
cluster.populate(3).start()
nodes = cluster.nodelist()
session = self.patient_cql_connection(nodes[0])
create_ks(session, 'ks', 3)
create_cf(session, 'cf', validation="CounterColumnType", columns={'c': 'counter'})
sessions = [self.patient_cql_connection(node, 'ks') for node in nodes]
nb_increment = 50
nb_counter = 10
for i in range(0, nb_increment):
for c in range(0, nb_counter):
session = sessions[(i + c) % len(nodes)]
query = SimpleStatement("UPDATE cf SET c = c + 1 WHERE key = 'counter%i'" % c, consistency_level=ConsistencyLevel.QUORUM)
session.execute(query)
session = sessions[i % len(nodes)]
keys = ",".join(["'counter%i'" % c for c in range(0, nb_counter)])
query = SimpleStatement("SELECT key, c FROM cf WHERE key IN (%s)" % keys, consistency_level=ConsistencyLevel.QUORUM)
res = list(session.execute(query))
assert_length_equal(res, nb_counter)
for c in range(0, nb_counter):
assert len(res[c]) == 2, "Expecting key and counter for counter {}, got {}".format(c, str(res[c]))
assert res[c][1] == i + 1, "Expecting counter {} = {}, got {}".format(c, i + 1, res[c][0])
示例10: test_decommission
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_decommission(self):
""" Test repaired data remains in sync after a decommission """
self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
'commitlog_sync_period_in_ms': 500})
self.init_default_config()
self.cluster.populate(4).start()
node1, node2, node3, node4 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node3)
session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
# insert some data
stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
for i in range(1000):
session.execute(stmt, (i, i))
node1.repair(options=['ks'])
for i in range(1000):
v = i + 1000
session.execute(stmt, (v, v))
# everything should be in sync
for node in self.cluster.nodelist():
result = node.repair(options=['ks', '--validate'])
assert "Repaired data is in sync" in result.stdout
node2.nodetool('decommission')
# everything should still be in sync
for node in [node1, node3, node4]:
result = node.repair(options=['ks', '--validate'])
assert "Repaired data is in sync" in result.stdout
示例11: test_bootstrap
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_bootstrap(self):
""" Test repaired data remains in sync after a bootstrap """
self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
'commitlog_sync_period_in_ms': 500})
self.init_default_config()
self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node3)
session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
# insert some data
stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
for i in range(1000):
session.execute(stmt, (i, i))
node1.repair(options=['ks'])
for i in range(1000):
v = i + 1000
session.execute(stmt, (v, v))
# everything should be in sync
for node in [node1, node2, node3]:
result = node.repair(options=['ks', '--validate'])
assert "Repaired data is in sync" in result.stdout
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True)
assert len(self.cluster.nodelist()) == 4
# everything should still be in sync
for node in self.cluster.nodelist():
result = node.repair(options=['ks', '--validate'])
assert "Repaired data is in sync" in result.stdout
示例12: test_force
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_force(self):
"""
forcing an incremental repair should incrementally repair any nodes
that are up, but should not promote the sstables to repaired
"""
self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
'num_tokens': 1,
'commitlog_sync_period_in_ms': 500})
self.init_default_config()
self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node3)
session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
stmt.consistency_level = ConsistencyLevel.ALL
for i in range(10):
session.execute(stmt, (i, i))
node2.stop(wait_other_notice=True)
# repair should fail because node2 is down
with pytest.raises(ToolError):
node1.repair(options=['ks'])
# run with force flag
node1.repair(options=['ks', '--force'])
# ... and verify nothing was promoted to repaired
self.assertNoRepairedSSTables(node1, 'ks')
self.assertNoRepairedSSTables(node2, 'ks')
示例13: test_force_with_none_down
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_force_with_none_down(self):
"""
if we force an incremental repair, but all the involved nodes are up,
we should run normally and promote sstables afterwards
"""
self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
'num_tokens': 1,
'commitlog_sync_period_in_ms': 500})
self.init_default_config()
self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node3)
session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
stmt.consistency_level = ConsistencyLevel.ALL
for i in range(10):
session.execute(stmt, (i, i))
# run with force flag
node1.repair(options=['ks', '--force'])
# ... and verify everything was still promoted
self.assertAllRepairedSSTables(node1, 'ks')
self.assertAllRepairedSSTables(node2, 'ks')
self.assertAllRepairedSSTables(node3, 'ks')
示例14: test_hosts
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_hosts(self):
"""
running an incremental repair with hosts specified should incrementally repair
the given nodes, but should not promote the sstables to repaired
"""
self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
'num_tokens': 1,
'commitlog_sync_period_in_ms': 500})
self.init_default_config()
self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node3)
session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
stmt.consistency_level = ConsistencyLevel.ALL
for i in range(10):
session.execute(stmt, (i, i))
# run with force flag
node1.repair(options=['ks', '-hosts', ','.join([node1.address(), node2.address()])])
# ... and verify nothing was promoted to repaired
self.assertNoRepairedSSTables(node1, 'ks')
self.assertNoRepairedSSTables(node2, 'ks')
示例15: test_repaired_tracking_with_partition_deletes
# 需要导入模块: from cassandra import query [as 别名]
# 或者: from cassandra.query import SimpleStatement [as 别名]
def test_repaired_tracking_with_partition_deletes(self):
"""
check that when an tracking repaired data status following a digest mismatch,
repaired data mismatches are marked as unconfirmed as we may skip sstables
after the partition delete are encountered.
@jira_ticket CASSANDRA-14145
"""
session, node1, node2 = self.setup_for_repaired_data_tracking()
stmt = SimpleStatement("INSERT INTO ks.tbl (k, c, v) VALUES (%s, %s, %s)")
stmt.consistency_level = ConsistencyLevel.ALL
for i in range(10):
session.execute(stmt, (i, i, i))
for node in self.cluster.nodelist():
node.flush()
self.assertNoRepairedSSTables(node, 'ks')
node1.repair(options=['ks'])
node2.stop(wait_other_notice=True)
session.execute("delete from ks.tbl where k = 5")
node1.flush()
node2.start(wait_other_notice=True)
# expect unconfirmed inconsistencies as the partition deletes cause some sstables to be skipped
with JolokiaAgent(node1) as jmx:
self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5",
expect_unconfirmed_inconsistencies=True)
self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5 AND c = 5",
expect_unconfirmed_inconsistencies=True)
# no digest reads for range queries so blocking read repair metric isn't incremented
# *all* sstables are read for partition ranges too, and as the repaired set is still in sync there should
# be no inconsistencies
self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl", expect_read_repair=False)