本文整理汇总了Python中tools.data.query_c1c2函数的典型用法代码示例。如果您正苦于以下问题:Python query_c1c2函数的具体用法?Python query_c1c2怎么用?Python query_c1c2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了query_c1c2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: readrepair_test
def readrepair_test(self):
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
if DISABLE_VNODES:
cluster.populate(2).start()
else:
tokens = cluster.balanced_tokens(2)
cluster.populate(2, tokens=tokens).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session, read_repair=1.0)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# query everything to cause RR
for n in xrange(0, 10000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been repaired
session = self.patient_cql_connection(node2, keyspace='ks')
for n in xrange(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
示例2: test_move_single_node
def test_move_single_node(self):
""" Test moving a node in a single-node cluster (#4200) """
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(1, tokens=[0]).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
cluster.flush()
node1.move(2**25)
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
示例3: _do_hinted_handoff
def _do_hinted_handoff(self, node1, node2, enabled, keyspace='ks'):
"""
Test that if we stop one node the other one
will store hints only when hinted handoff is enabled
"""
session = self.patient_exclusive_cql_connection(node1)
create_ks(session, keyspace, 2)
create_c1c2_table(self, session)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
log_mark = node1.mark_log()
node2.start(wait_other_notice=True)
if enabled:
node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled
session = self.patient_exclusive_cql_connection(node2, keyspace=keyspace)
for n in xrange(0, 100):
if enabled:
query_c1c2(session, n, ConsistencyLevel.ONE)
else:
query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
示例4: test_decommission
def test_decommission(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(4)
cluster.populate(4, tokens=tokens).start()
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)
cluster.flush()
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
init_size = sizes[0]
assert_almost_equal(*sizes)
time.sleep(.5)
node4.decommission()
node4.stop()
cluster.cleanup()
time.sleep(.5)
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
logger.debug(sizes)
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
assert_almost_equal(sizes[2], init_size)
示例5: quorum_available_during_failure_test
def quorum_available_during_failure_test(self):
CL = ConsistencyLevel.QUORUM
RF = 3
debug("Creating a ring")
cluster = self.cluster
if DISABLE_VNODES:
cluster.populate(3).start()
else:
tokens = cluster.balanced_tokens(3)
cluster.populate(3, tokens=tokens).start()
node1, node2, node3 = cluster.nodelist()
debug("Set to talk to node 2")
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', RF)
create_c1c2_table(self, session)
debug("Generating some data")
insert_c1c2(session, n=100, consistency=CL)
debug("Taking down node1")
node1.stop(wait_other_notice=True)
debug("Reading back data.")
for n in xrange(100):
query_c1c2(session, n, CL)
示例6: test_resumable_decommission
def test_resumable_decommission(self):
"""
@jira_ticket CASSANDRA-12008
Test decommission operation is resumable
"""
self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred',
r'Error while decommissioning node',
r'Remote peer 127.0.0.2 failed stream session',
r'Remote peer 127.0.0.2:7000 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node2)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
# Execute first rebuild, should fail
with pytest.raises(ToolError):
if cluster.version() >= '4.0':
script = ['./byteman/4.0/decommission_failure_inject.btm']
else:
script = ['./byteman/pre4.0/decommission_failure_inject.btm']
node2.byteman_submit(script)
node2.nodetool('decommission')
# Make sure previous ToolError is due to decommission
node2.watch_log_for('Error while decommissioning node')
# Decommission again
mark = node2.mark_log()
node2.nodetool('decommission')
# Check decommision is done and we skipped transfereed ranges
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint {}".format(node2.address_for_current_version_slashy()), filename='debug.log')
# Check data is correctly forwarded to node1 and node3
cluster.remove(node2)
node3.stop(gently=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
node1.stop(gently=False)
node3.start()
session.shutdown()
mark = node3.mark_log()
node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
session = self.patient_exclusive_cql_connection(node3)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
示例7: resumable_decommission_test
def resumable_decommission_test(self):
"""
@jira_ticket CASSANDRA-12008
Test decommission operation is resumable
"""
self.ignore_log_patterns = [r'Streaming error occurred', r'Error while decommissioning node', r'Remote peer 127.0.0.2 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
# Execute first rebuild, should fail
with self.assertRaises(ToolError):
script = ['./byteman/decommission_failure_inject.btm']
node2.byteman_submit(script)
node2.nodetool('decommission')
# Make sure previous ToolError is due to decommission
node2.watch_log_for('Error while decommissioning node')
# Decommission again
mark = node2.mark_log()
node2.nodetool('decommission')
# Check decommision is done and we skipped transfereed ranges
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint /127.0.0.3", filename='debug.log')
# Check data is correctly forwarded to node1 and node3
cluster.remove(node2)
node3.stop(gently=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in xrange(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
node1.stop(gently=False)
node3.start()
session.shutdown()
mark = node3.mark_log()
node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
session = self.patient_exclusive_cql_connection(node3)
session.execute('USE ks')
for i in xrange(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
示例8: hintedhandoff_decom_test
def hintedhandoff_decom_test(self):
self.cluster.populate(4).start(wait_for_binary_proto=True)
[node1, node2, node3, node4] = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
node4.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
node1.decommission()
node4.start(wait_for_binary_proto=True)
node2.decommission()
node3.decommission()
time.sleep(5)
for x in xrange(0, 100):
query_c1c2(session, x, ConsistencyLevel.ONE)
示例9: test_non_local_read
def test_non_local_read(self):
""" This test reads from a coordinator we know has no copy of the data """
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
# insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.QUORUM)
for n in range(0, 1000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
示例10: test_movement
def test_movement(self):
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)
cluster.flush()
# Move nodes to balance the cluster
def move_node(node, token):
mark = node.mark_log()
node.move(token) # can't assume 0 is balanced with m3p
node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
time.sleep(3)
balancing_tokens = cluster.balanced_tokens(3)
move_node(node1, balancing_tokens[0])
move_node(node2, balancing_tokens[1])
move_node(node3, balancing_tokens[2])
time.sleep(1)
cluster.cleanup()
for node in cluster.nodelist():
# after moving nodes we need to relocate any tokens in the wrong places, and after doing that
# we might have overlapping tokens on the disks, so run a major compaction to get balance even
if cluster.version() >= '3.2':
node.nodetool("relocatesstables")
node.nodetool("compact")
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.ONE)
# Now the load should be basically even
sizes = [node.data_size() for node in [node1, node2, node3]]
assert_almost_equal(sizes[0], sizes[1], error=0.05)
assert_almost_equal(sizes[0], sizes[2], error=0.05)
assert_almost_equal(sizes[1], sizes[2], error=0.05)
示例11: test_consistent_reads_after_bootstrap
def test_consistent_reads_after_bootstrap(self):
logger.debug("Creating a ring")
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'write_request_timeout_in_ms': 60000,
'read_request_timeout_in_ms': 60000,
'dynamic_snitch_badness_threshold': 0.0})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2)
node1, node2 = cluster.nodelist()
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
logger.debug("Set to talk to node 2")
n2session = self.patient_cql_connection(node2)
create_ks(n2session, 'ks', 2)
create_c1c2_table(self, n2session)
logger.debug("Generating some data for all nodes")
insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL)
node1.flush()
logger.debug("Taking down node1")
node1.stop(wait_other_notice=True)
logger.debug("Writing data to only node2")
insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE)
node2.flush()
logger.debug("Restart node1")
node1.start(wait_other_notice=True)
logger.debug("Bootstraping node3")
node3 = new_node(cluster)
node3.start(wait_for_binary_proto=True)
n3session = self.patient_cql_connection(node3)
n3session.execute("USE ks")
logger.debug("Checking that no data was lost")
for n in range(10, 20):
query_c1c2(n3session, n, ConsistencyLevel.ALL)
for n in range(30, 1000):
query_c1c2(n3session, n, ConsistencyLevel.ALL)
示例12: test_movement
def test_movement(self):
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)
cluster.flush()
# Move nodes to balance the cluster
def move_node(node, token):
mark = node.mark_log()
node.move(token) # can't assume 0 is balanced with m3p
node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
time.sleep(3)
balancing_tokens = cluster.balanced_tokens(3)
move_node(node1, balancing_tokens[0])
move_node(node2, balancing_tokens[1])
move_node(node3, balancing_tokens[2])
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.ONE)
# Now the load should be basically even
sizes = [node.data_size() for node in [node1, node2, node3]]
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal(sizes[0], sizes[2])
assert_almost_equal(sizes[1], sizes[2])
示例13: blacklisted_directory_test
def blacklisted_directory_test(self):
cluster = self.cluster
cluster.set_datadir_count(3)
cluster.populate(1)
[node] = cluster.nodelist()
remove_perf_disable_shared_mem(node)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
create_ks(session, 'ks', 1)
create_c1c2_table(self, session)
insert_c1c2(session, n=10000)
node.flush()
for k in xrange(0, 10000):
query_c1c2(session, k)
node.compact()
mbean = make_mbean('db', type='BlacklistedDirectories')
with JolokiaAgent(node) as jmx:
jmx.execute_method(mbean, 'markUnwritable', [os.path.join(node.get_path(), 'data0')])
for k in xrange(0, 10000):
query_c1c2(session, k)
node.nodetool('relocatesstables')
for k in xrange(0, 10000):
query_c1c2(session, k)
示例14: consistent_reads_after_move_test
def consistent_reads_after_move_test(self):
debug("Creating a ring")
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'write_request_timeout_in_ms': 60000,
'read_request_timeout_in_ms': 60000, 'dynamic_snitch_badness_threshold': 0.0})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
debug("Set to talk to node 2")
n2session = self.patient_cql_connection(node2)
create_ks(n2session, 'ks', 2)
create_c1c2_table(self, n2session)
debug("Generating some data for all nodes")
insert_c1c2(n2session, keys=range(10, 20), consistency=ConsistencyLevel.ALL)
node1.flush()
debug("Taking down node1")
node1.stop(wait_other_notice=True)
debug("Writing data to node2")
insert_c1c2(n2session, keys=range(30, 1000), consistency=ConsistencyLevel.ONE)
node2.flush()
debug("Restart node1")
node1.start(wait_other_notice=True)
debug("Move token on node3")
node3.move(2)
debug("Checking that no data was lost")
for n in xrange(10, 20):
query_c1c2(n2session, n, ConsistencyLevel.ALL)
for n in xrange(30, 1000):
query_c1c2(n2session, n, ConsistencyLevel.ALL)
示例15: test_concurrent_decommission_not_allowed
def test_concurrent_decommission_not_allowed(self):
"""
Test concurrent decommission is not allowed
"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
mark = node2.mark_log()
def decommission():
node2.nodetool('decommission')
# Launch first decommission in a external thread
t = Thread(target=decommission)
t.start()
# Make sure first decommission is initialized before second decommission
node2.watch_log_for('DECOMMISSIONING', filename='debug.log')
# Launch a second decommission, should fail
with pytest.raises(ToolError):
node2.nodetool('decommission')
# Check data is correctly forwarded to node1 after node2 is decommissioned
t.join()
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
session = self.patient_cql_connection(node1)
session.execute('USE ks')
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)