本文整理汇总了Python中tools.data.insert_c1c2函数的典型用法代码示例。如果您正苦于以下问题:Python insert_c1c2函数的具体用法?Python insert_c1c2怎么用?Python insert_c1c2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了insert_c1c2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: blacklisted_directory_test
def blacklisted_directory_test(self):
cluster = self.cluster
cluster.set_datadir_count(3)
cluster.populate(1)
[node] = cluster.nodelist()
remove_perf_disable_shared_mem(node)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
create_ks(session, 'ks', 1)
create_c1c2_table(self, session)
insert_c1c2(session, n=10000)
node.flush()
for k in xrange(0, 10000):
query_c1c2(session, k)
node.compact()
mbean = make_mbean('db', type='BlacklistedDirectories')
with JolokiaAgent(node) as jmx:
jmx.execute_method(mbean, 'markUnwritable', [os.path.join(node.get_path(), 'data0')])
for k in xrange(0, 10000):
query_c1c2(session, k)
node.nodetool('relocatesstables')
for k in xrange(0, 10000):
query_c1c2(session, k)
示例2: _do_hinted_handoff
def _do_hinted_handoff(self, node1, node2, enabled, keyspace='ks'):
"""
Test that if we stop one node the other one
will store hints only when hinted handoff is enabled
"""
session = self.patient_exclusive_cql_connection(node1)
create_ks(session, keyspace, 2)
create_c1c2_table(self, session)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
log_mark = node1.mark_log()
node2.start(wait_other_notice=True)
if enabled:
node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled
session = self.patient_exclusive_cql_connection(node2, keyspace=keyspace)
for n in xrange(0, 100):
if enabled:
query_c1c2(session, n, ConsistencyLevel.ONE)
else:
query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
示例3: test_decommission
def test_decommission(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(4)
cluster.populate(4, tokens=tokens).start()
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)
cluster.flush()
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
init_size = sizes[0]
assert_almost_equal(*sizes)
time.sleep(.5)
node4.decommission()
node4.stop()
cluster.cleanup()
time.sleep(.5)
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
logger.debug(sizes)
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
assert_almost_equal(sizes[2], init_size)
示例4: test_move_single_node
def test_move_single_node(self):
""" Test moving a node in a single-node cluster (#4200) """
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(1, tokens=[0]).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
cluster.flush()
node1.move(2**25)
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
示例5: readrepair_test
def readrepair_test(self):
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
if DISABLE_VNODES:
cluster.populate(2).start()
else:
tokens = cluster.balanced_tokens(2)
cluster.populate(2, tokens=tokens).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session, read_repair=1.0)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# query everything to cause RR
for n in xrange(0, 10000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been repaired
session = self.patient_cql_connection(node2, keyspace='ks')
for n in xrange(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
示例6: quorum_available_during_failure_test
def quorum_available_during_failure_test(self):
CL = ConsistencyLevel.QUORUM
RF = 3
debug("Creating a ring")
cluster = self.cluster
if DISABLE_VNODES:
cluster.populate(3).start()
else:
tokens = cluster.balanced_tokens(3)
cluster.populate(3, tokens=tokens).start()
node1, node2, node3 = cluster.nodelist()
debug("Set to talk to node 2")
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', RF)
create_c1c2_table(self, session)
debug("Generating some data")
insert_c1c2(session, n=100, consistency=CL)
debug("Taking down node1")
node1.stop(wait_other_notice=True)
debug("Reading back data.")
for n in xrange(100):
query_c1c2(session, n, CL)
示例7: test_resumable_decommission
def test_resumable_decommission(self):
"""
@jira_ticket CASSANDRA-12008
Test decommission operation is resumable
"""
self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred',
r'Error while decommissioning node',
r'Remote peer 127.0.0.2 failed stream session',
r'Remote peer 127.0.0.2:7000 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node2)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
# Execute first rebuild, should fail
with pytest.raises(ToolError):
if cluster.version() >= '4.0':
script = ['./byteman/4.0/decommission_failure_inject.btm']
else:
script = ['./byteman/pre4.0/decommission_failure_inject.btm']
node2.byteman_submit(script)
node2.nodetool('decommission')
# Make sure previous ToolError is due to decommission
node2.watch_log_for('Error while decommissioning node')
# Decommission again
mark = node2.mark_log()
node2.nodetool('decommission')
# Check decommision is done and we skipped transfereed ranges
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint {}".format(node2.address_for_current_version_slashy()), filename='debug.log')
# Check data is correctly forwarded to node1 and node3
cluster.remove(node2)
node3.stop(gently=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
node1.stop(gently=False)
node3.start()
session.shutdown()
mark = node3.mark_log()
node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
session = self.patient_exclusive_cql_connection(node3)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
示例8: _deprecated_repair_jmx
def _deprecated_repair_jmx(self, method, arguments):
"""
* Launch a two node, two DC cluster
* Create a keyspace and table
* Insert some data
* Call the deprecated repair JMX API based on the arguments passed into this method
* Check the node log to see if the correct repair was performed based on the jmx args
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([1, 1])
node1, node2 = cluster.nodelist()
remove_perf_disable_shared_mem(node1)
cluster.start()
supports_pull_repair = cluster.version() >= LooseVersion('3.10')
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
# Run repair
mbean = make_mbean('db', 'StorageService')
with JolokiaAgent(node1) as jmx:
# assert repair runs and returns valid cmd number
assert jmx.execute_method(mbean, method, arguments) == 1
# wait for log to start
node1.watch_log_for("Starting repair command")
# get repair parameters from the log
line = node1.grep_log(("Starting repair command #1" + (" \([^\)]+\)" if cluster.version() >= LooseVersion("3.10") else "") +
", repairing keyspace ks with repair options \(parallelism: (?P<parallelism>\w+), primary range: (?P<pr>\w+), "
"incremental: (?P<incremental>\w+), job threads: (?P<jobs>\d+), ColumnFamilies: (?P<cfs>.+), dataCenters: (?P<dc>.+), "
"hosts: (?P<hosts>.+), # of ranges: (?P<ranges>\d+)(, pull repair: (?P<pullrepair>true|false))?\)"))
assert_length_equal(line, 1)
line, m = line[0]
if supports_pull_repair:
assert m.group("pullrepair"), "false" == "Pull repair cannot be enabled through the deprecated API so the pull repair option should always be false."
return {"parallelism": m.group("parallelism"),
"primary_range": m.group("pr"),
"incremental": m.group("incremental"),
"job_threads": m.group("jobs"),
"column_families": m.group("cfs"),
"data_centers": m.group("dc"),
"hosts": m.group("hosts"),
"ranges": m.group("ranges")}
示例9: resumable_decommission_test
def resumable_decommission_test(self):
"""
@jira_ticket CASSANDRA-12008
Test decommission operation is resumable
"""
self.ignore_log_patterns = [r'Streaming error occurred', r'Error while decommissioning node', r'Remote peer 127.0.0.2 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
# Execute first rebuild, should fail
with self.assertRaises(ToolError):
script = ['./byteman/decommission_failure_inject.btm']
node2.byteman_submit(script)
node2.nodetool('decommission')
# Make sure previous ToolError is due to decommission
node2.watch_log_for('Error while decommissioning node')
# Decommission again
mark = node2.mark_log()
node2.nodetool('decommission')
# Check decommision is done and we skipped transfereed ranges
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint /127.0.0.3", filename='debug.log')
# Check data is correctly forwarded to node1 and node3
cluster.remove(node2)
node3.stop(gently=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in xrange(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
node1.stop(gently=False)
node3.start()
session.shutdown()
mark = node3.mark_log()
node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
session = self.patient_exclusive_cql_connection(node3)
session.execute('USE ks')
for i in xrange(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
示例10: hintedhandoff_decom_test
def hintedhandoff_decom_test(self):
self.cluster.populate(4).start(wait_for_binary_proto=True)
[node1, node2, node3, node4] = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
node4.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
node1.decommission()
node4.start(wait_for_binary_proto=True)
node2.decommission()
node3.decommission()
time.sleep(5)
for x in xrange(0, 100):
query_c1c2(session, x, ConsistencyLevel.ONE)
示例11: test_non_local_read
def test_non_local_read(self):
""" This test reads from a coordinator we know has no copy of the data """
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
# insert and get at CL.QUORUM (since RF=2, node1 won't have all key locally)
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.QUORUM)
for n in range(0, 1000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
示例12: test_movement
def test_movement(self):
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)
cluster.flush()
# Move nodes to balance the cluster
def move_node(node, token):
mark = node.mark_log()
node.move(token) # can't assume 0 is balanced with m3p
node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
time.sleep(3)
balancing_tokens = cluster.balanced_tokens(3)
move_node(node1, balancing_tokens[0])
move_node(node2, balancing_tokens[1])
move_node(node3, balancing_tokens[2])
time.sleep(1)
cluster.cleanup()
for node in cluster.nodelist():
# after moving nodes we need to relocate any tokens in the wrong places, and after doing that
# we might have overlapping tokens on the disks, so run a major compaction to get balance even
if cluster.version() >= '3.2':
node.nodetool("relocatesstables")
node.nodetool("compact")
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.ONE)
# Now the load should be basically even
sizes = [node.data_size() for node in [node1, node2, node3]]
assert_almost_equal(sizes[0], sizes[1], error=0.05)
assert_almost_equal(sizes[0], sizes[2], error=0.05)
assert_almost_equal(sizes[1], sizes[2], error=0.05)
示例13: test_consistent_reads_after_bootstrap
def test_consistent_reads_after_bootstrap(self):
logger.debug("Creating a ring")
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'write_request_timeout_in_ms': 60000,
'read_request_timeout_in_ms': 60000,
'dynamic_snitch_badness_threshold': 0.0})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2)
node1, node2 = cluster.nodelist()
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
logger.debug("Set to talk to node 2")
n2session = self.patient_cql_connection(node2)
create_ks(n2session, 'ks', 2)
create_c1c2_table(self, n2session)
logger.debug("Generating some data for all nodes")
insert_c1c2(n2session, keys=list(range(10, 20)), consistency=ConsistencyLevel.ALL)
node1.flush()
logger.debug("Taking down node1")
node1.stop(wait_other_notice=True)
logger.debug("Writing data to only node2")
insert_c1c2(n2session, keys=list(range(30, 1000)), consistency=ConsistencyLevel.ONE)
node2.flush()
logger.debug("Restart node1")
node1.start(wait_other_notice=True)
logger.debug("Bootstraping node3")
node3 = new_node(cluster)
node3.start(wait_for_binary_proto=True)
n3session = self.patient_cql_connection(node3)
n3session.execute("USE ks")
logger.debug("Checking that no data was lost")
for n in range(10, 20):
query_c1c2(n3session, n, ConsistencyLevel.ALL)
for n in range(30, 1000):
query_c1c2(n3session, n, ConsistencyLevel.ALL)
示例14: _test_streaming
def _test_streaming(self, op_zerocopy, op_partial, num_partial, num_zerocopy,
compaction_strategy='LeveledCompactionStrategy', num_keys=1000, rf=3, num_nodes=3):
keys = num_keys
cluster = self.cluster
tokens = cluster.balanced_tokens(num_nodes)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate(num_nodes)
nodes = cluster.nodelist()
for i in range(0, len(nodes)):
nodes[i].set_configuration_options(values={'initial_token': tokens[i]})
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(nodes[0])
create_ks(session, name='ks2', rf=rf)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'},
compaction_strategy=compaction_strategy)
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session_n2 = self.patient_exclusive_cql_connection(nodes[1])
session_n2.execute("TRUNCATE system.available_ranges;")
mark = nodes[1].mark_log()
nodes[1].nodetool('rebuild -ks ks2')
nodes[1].watch_log_for('Completed submission of build tasks', filename='debug.log', timeout=120)
zerocopy_streamed_sstable = len(
nodes[1].grep_log('.*CassandraEntireSSTableStreamReader.*?Finished receiving Data.*', filename='debug.log',
from_mark=mark))
partial_streamed_sstable = len(
nodes[1].grep_log('.*CassandraStreamReader.*?Finished receiving file.*', filename='debug.log',
from_mark=mark))
assert op_zerocopy(zerocopy_streamed_sstable, num_zerocopy), "%s %s %s" % (num_zerocopy, opmap.get(op_zerocopy),
zerocopy_streamed_sstable)
assert op_partial(partial_streamed_sstable, num_partial), "%s %s %s" % (num_partial, op_partial,
partial_streamed_sstable)
示例15: test_movement
def test_movement(self):
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)
cluster.flush()
# Move nodes to balance the cluster
def move_node(node, token):
mark = node.mark_log()
node.move(token) # can't assume 0 is balanced with m3p
node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
time.sleep(3)
balancing_tokens = cluster.balanced_tokens(3)
move_node(node1, balancing_tokens[0])
move_node(node2, balancing_tokens[1])
move_node(node3, balancing_tokens[2])
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.ONE)
# Now the load should be basically even
sizes = [node.data_size() for node in [node1, node2, node3]]
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal(sizes[0], sizes[2])
assert_almost_equal(sizes[1], sizes[2])