本文整理汇总了Python中ccmlib.node.Node.start方法的典型用法代码示例。如果您正苦于以下问题:Python Node.start方法的具体用法?Python Node.start怎么用?Python Node.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ccmlib.node.Node
的用法示例。
在下文中一共展示了Node.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: replace_active_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def replace_active_node_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
[node1,node2, node3] = cluster.nodelist()
debug("Inserting Data...")
if cluster.version() < "2.1":
node1.stress(['-o', 'insert', '--num-keys=10000', '--replication-factor=3'])
else:
node1.stress(['write', 'n=10000', '-schema', 'replication(factor=3)'])
cursor = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1' if self.cluster.version() >= '2.1' else '"Keyspace1"."Standard1"'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = cursor.execute(query)
#replace active node 3 with node 4
debug("Starting node 4 to replace active node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, ('127.0.0.4',9042))
cluster.add(node4, False)
with self.assertRaises(NodeError):
try:
node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)
except (NodeError, TimeoutError):
raise NodeError("Node could not start.")
checkError = node4.grep_log("java.lang.UnsupportedOperationException: Cannot replace a live node...")
self.assertEqual(len(checkError), 1)
示例2: decommission_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def decommission_node_test(self):
debug("decommission_node_test()")
cluster = self.cluster
cluster.populate(1)
# create and add a new node, I must not be a seed, otherwise
# we get schema disagreement issues for awhile after decommissioning it.
node2 = Node("node2", cluster, True, ("127.0.0.2", 9160), ("127.0.0.2", 7000), "7200", None)
cluster.add(node2, False)
[node1, node2] = cluster.nodelist()
node1.start()
node2.start()
wait(2)
cursor = self.cql_connection(node1).cursor()
self.prepare_for_changes(cursor)
node2.decommission()
wait(30)
self.validate_schema_consistent(node1)
self.make_schema_changes(cursor, namespace="ns1")
# create and add a new node
node3 = Node("node3", cluster, True, ("127.0.0.3", 9160), ("127.0.0.3", 7000), "7300", None)
cluster.add(node3, True)
node3.start()
wait(30)
self.validate_schema_consistent(node1)
示例3: test_upgrade_legacy_table
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def test_upgrade_legacy_table(self):
"""
Upgrade with bringing up the legacy tables after the newer nodes (without legacy tables)
were started.
@jira_ticket CASSANDRA-12813
"""
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="2.1.16")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# Wait for default user to get created on one of the nodes
time.sleep(15)
# Upgrade to current version
for node in [node1, node2, node3]:
node.drain()
node.watch_log_for("DRAINED")
node.stop(gently=True)
self.set_node_to_current_version(node)
cluster.start()
# Make sure the system_auth table will get replicated to the node that we're going to replace
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 };")
cluster.repair()
cluster.stop()
# Replace the node
cluster.seeds.remove(node1)
cluster.remove(node1)
replacement_address = node1.address()
replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
thrift_interface=(replacement_address, 9160), storage_interface=(replacement_address, 7000),
jmx_port='7400', remote_debug_port='0', initial_token=None, binary_interface=(replacement_address, 9042))
self.set_node_to_current_version(replacement_node)
cluster.add(replacement_node, True)
replacement_node.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
replacement_node.watch_log_for('Initializing system_auth.credentials')
replacement_node.watch_log_for('Initializing system_auth.permissions')
replacement_node.watch_log_for('Initializing system_auth.users')
cluster.repair()
replacement_node.watch_log_for('Repair command')
# Should succeed. Will throw an NPE on pre-12813 code.
self.patient_cql_connection(replacement_node, user='cassandra', password='cassandra')
示例4: _init_new_loading_node
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
logger.debug('adding node')
self.cluster.add(loading_node, is_seed=True)
logger.debug('starting new node')
loading_node.start(wait_for_binary_proto=True)
logger.debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
logger.debug('creating new table')
loading_session.execute(create_stmt)
logger.debug('stopping new node')
loading_session.cluster.shutdown()
loading_node.stop()
return loading_node
示例5: replace_nonexistent_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def replace_nonexistent_node_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
[node1,node2, node3] = cluster.nodelist()
debug("Inserting Data...")
if cluster.version() < "2.1":
node1.stress(['-o', 'insert', '--num-keys=10000', '--replication-factor=3'])
else:
node1.stress(['write', 'n=10000', '-schema', 'replication(factor=3)'])
cursor = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1' if self.cluster.version() >= '2.1' else '"Keyspace1"."Standard1"'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = cursor.execute(query)
debug('Start node 4 and replace an address with no node')
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, ('127.0.0.4',9042))
cluster.add(node4, False)
#try to replace an unassigned ip address
with self.assertRaises(NodeError):
try:
node4.start(replace_address='127.0.0.5', wait_for_binary_proto=True)
except (NodeError, TimeoutError):
raise NodeError("Node could not start.")
示例6: replace_with_reset_resume_state_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def replace_with_reset_resume_state_test(self):
"""Test replace with resetting bootstrap progress"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initial_data = rows_to_list(session.execute(query))
node3.stop(gently=False)
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),
storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',
initial_token=None, binary_interface=('127.0.0.4', 9042))
# keep timeout low so that test won't hang
node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
cluster.add(node4, False)
try:
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_other_notice=False)
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node4 bootstrap with resetting bootstrap state
node4.stop()
mark = node4.mark_log()
node4.start(jvm_args=[
"-Dcassandra.replace_address_first_boot=127.0.0.3",
"-Dcassandra.reset_bootstrap_progress=true"
])
# check if we reset bootstrap state
node4.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node4.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
assert_bootstrap_state(self, node4, 'COMPLETED')
# query should work again
debug("Stopping old nodes")
node1.stop(gently=False, wait_other_notice=True)
node2.stop(gently=False, wait_other_notice=True)
debug("Verifying data on new node.")
session = self.patient_exclusive_cql_connection(node4)
assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),
expected=initial_data,
cl=ConsistencyLevel.ONE)
示例7: _test_disk_balance_replace
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def _test_disk_balance_replace(self, same_address):
logger.debug("Creating cluster")
cluster = self.cluster
if self.dtest_config.use_vnodes:
cluster.set_configuration_options(values={'num_tokens': 256})
# apparently we have legitimate errors in the log when bootstrapping (see bootstrap_test.py)
self.fixture_dtest_setup.allow_log_errors = True
cluster.populate(4).start(wait_for_binary_proto=True)
node1 = cluster.nodes['node1']
logger.debug("Populating")
node1.stress(['write', 'n=50k', 'no-warmup', '-rate', 'threads=100', '-schema', 'replication(factor=3)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)'])
cluster.flush()
logger.debug("Stopping and removing node2")
node2 = cluster.nodes['node2']
node2.stop(gently=False)
self.cluster.remove(node2)
node5_address = node2.address() if same_address else '127.0.0.5'
logger.debug("Starting replacement node")
node5 = Node('node5', cluster=self.cluster, auto_bootstrap=True,
thrift_interface=None, storage_interface=(node5_address, 7000),
jmx_port='7500', remote_debug_port='0', initial_token=None,
binary_interface=(node5_address, 9042))
self.cluster.add(node5, False)
node5.start(jvm_args=["-Dcassandra.replace_address_first_boot={}".format(node2.address())],
wait_for_binary_proto=True,
wait_other_notice=True)
logger.debug("Checking replacement node is balanced")
self.assert_balanced(node5)
示例8: resumable_replace_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def resumable_replace_test(self):
"""
Test resumable bootstrap while replacing node. Feature introduced in
2.2 with ticket https://issues.apache.org/jira/browse/CASSANDRA-8838
@jira_ticket https://issues.apache.org/jira/browse/CASSANDRA-8838
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initial_data = rows_to_list(session.execute(query))
node3.stop(gently=False)
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),
storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',
initial_token=None, binary_interface=('127.0.0.4', 9042))
# keep timeout low so that test won't hang
node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
cluster.add(node4, False)
try:
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_other_notice=False)
except NodeError:
pass # node doesn't start as expected
t.join()
# bring back node1 and invoke nodetool bootstrap to resume bootstrapping
node1.start()
node4.nodetool('bootstrap resume')
# check if we skipped already retrieved ranges
node4.watch_log_for("already available. Skipping streaming.")
# wait for node3 ready to query
node4.watch_log_for("Listening for thrift clients...")
# check if 2nd bootstrap succeeded
assert_bootstrap_state(self, node4, 'COMPLETED')
# query should work again
debug("Stopping old nodes")
node1.stop(gently=False, wait_other_notice=True)
node2.stop(gently=False, wait_other_notice=True)
debug("Verifying data on new node.")
session = self.patient_exclusive_cql_connection(node4)
assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),
expected=initial_data,
cl=ConsistencyLevel.ONE)
示例9: multiple_repair_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def multiple_repair_test(self):
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 3)
self.create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
debug("insert data")
insert_c1c2(session, keys=range(1, 50), consistency=ConsistencyLevel.ALL)
node1.flush()
debug("bringing down node 3")
node3.flush()
node3.stop(gently=False)
debug("inserting additional data into node 1 and 2")
insert_c1c2(session, keys=range(50, 100), consistency=ConsistencyLevel.TWO)
node1.flush()
node2.flush()
debug("restarting and repairing node 3")
node3.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node3.repair()
else:
node3.nodetool("repair -par -inc")
# wait stream handlers to be closed on windows
# after session is finished (See CASSANDRA-10644)
if is_win:
time.sleep(2)
debug("stopping node 2")
node2.stop(gently=False)
debug("inserting data in nodes 1 and 3")
insert_c1c2(session, keys=range(100, 150), consistency=ConsistencyLevel.TWO)
node1.flush()
node3.flush()
debug("start and repair node 2")
node2.start(wait_for_binary_proto=True)
if cluster.version() >= "2.2":
node2.repair()
else:
node2.nodetool("repair -par -inc")
debug("replace node and check data integrity")
node3.stop(gently=False)
node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5', 9042))
cluster.add(node5, False)
node5.start(replace_address='127.0.0.3', wait_other_notice=True)
assert_one(session, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
示例10: multiple_repair_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def multiple_repair_test(self):
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 3)
self.create_cf(cursor, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
debug("insert data")
for x in range(1, 50):
insert_c1c2(cursor, x, ConsistencyLevel.ALL)
node1.flush()
debug("bringing down node 3")
node3.flush()
node3.stop(gently=False)
debug("inserting additional data into node 1 and 2")
for y in range(50, 100):
insert_c1c2(cursor, y, ConsistencyLevel.TWO)
node1.flush()
node2.flush()
debug("restarting and repairing node 3")
node3.start()
if cluster.version() >= "3.0":
node3.repair()
else:
node3.nodetool("repair -par -inc")
debug("stopping node 2")
node2.stop(gently=False)
debug("inserting data in nodes 1 and 3")
for z in range(100, 150):
insert_c1c2(cursor, z, ConsistencyLevel.TWO)
node1.flush()
node3.flush()
debug("start and repair node 2")
node2.start()
if cluster.version() >= "3.0":
node2.repair()
else:
node2.nodetool("repair -par -inc")
debug("replace node and check data integrity")
node3.stop(gently=False)
node5 = Node('node5', cluster, True, ('127.0.0.5', 9160), ('127.0.0.5', 7000), '7500', '0', None, ('127.0.0.5',9042))
cluster.add(node5, False)
node5.start(replace_address = '127.0.0.3', wait_other_notice=True)
assert_one(cursor, "SELECT COUNT(*) FROM ks.cf LIMIT 200", [149])
示例11: resumable_replace_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def resumable_replace_test(self):
"""
Test resumable bootstrap while replacing node. Feature introduced in
2.2 with ticket https://issues.apache.org/jira/browse/CASSANDRA-8838
@jira_ticket https://issues.apache.org/jira/browse/CASSANDRA-8838
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100K', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = list(session.execute(query))
node3.stop(gently=False)
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, binary_interface=('127.0.0.4', 9042))
# keep timeout low so that test won't hang
node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
cluster.add(node4, False)
try:
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_other_notice=False)
except NodeError:
pass # node doesn't start as expected
t.join()
# bring back node1 and invoke nodetool bootstrap to resume bootstrapping
node1.start()
node4.nodetool('bootstrap resume')
# check if we skipped already retrieved ranges
node4.watch_log_for("already available. Skipping streaming.")
# wait for node3 ready to query
node4.watch_log_for("Listening for thrift clients...")
# check if 2nd bootstrap succeeded
session = self.exclusive_cql_connection(node4)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'COMPLETED', rows[0][0]
# query should work again
debug("Verifying querying works again.")
finalData = list(session.execute(query))
self.assertListEqual(initialData, finalData)
示例12: replace_with_reset_resume_state_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def replace_with_reset_resume_state_test(self):
"""Test replace with resetting bootstrap progress"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100000', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = list(session.execute(query))
node3.stop(gently=False)
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, binary_interface=('127.0.0.4', 9042))
# keep timeout low so that test won't hang
node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
cluster.add(node4, False)
try:
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_other_notice=False)
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node4 bootstrap with resetting bootstrap state
node4.stop()
mark = node4.mark_log()
node4.start(jvm_args=[
"-Dcassandra.replace_address_first_boot=127.0.0.3",
"-Dcassandra.reset_bootstrap_progress=true"
])
# check if we reset bootstrap state
node4.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node4.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
session = self.exclusive_cql_connection(node4)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'COMPLETED', rows[0][0]
# query should work again
debug("Verifying querying works again.")
finalData = list(session.execute(query))
self.assertListEqual(initialData, finalData)
示例13: issue_150_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def issue_150_test(self):
self.cluster = Cluster(CLUSTER_PATH, "150", cassandra_version='2.0.9')
self.cluster.populate([1, 2], use_vnodes=True)
self.cluster.start()
dcs = [node.data_center for node in self.cluster.nodelist()]
dcs.append('dc2')
node4 = Node('node4', self.cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000),
'7400', '2000', None)
self.cluster.add(node4, False, 'dc2')
node4.start()
dcs_2 = [node.data_center for node in self.cluster.nodelist()]
self.assertItemsEqual(dcs, dcs_2)
node4.nodetool('status')
示例14: replace_nonexistent_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def replace_nonexistent_node_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
debug('Start node 4 and replace an address with no node')
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, binary_interface=('127.0.0.4', 9042))
cluster.add(node4, False)
# try to replace an unassigned ip address
mark = node4.mark_log()
node4.start(replace_address='127.0.0.5', wait_other_notice=False)
node4.watch_log_for("java.lang.RuntimeException: Cannot replace_address /127.0.0.5 because it doesn't exist in gossip", from_mark=mark)
self.check_not_running(node4)
示例15: test_decommission_node
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import start [as 别名]
def test_decommission_node(self):
logger.debug("decommission_node_test()")
cluster = self.cluster
cluster.populate(1)
# create and add a new node, I must not be a seed, otherwise
# we get schema disagreement issues for awhile after decommissioning it.
node2 = Node('node2',
cluster,
True,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200',
'0',
None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False)
node1, node2 = cluster.nodelist()
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
wait(2)
session = self.patient_cql_connection(node1)
self.prepare_for_changes(session)
node2.decommission()
wait(30)
self.validate_schema_consistent(node1)
self.make_schema_changes(session, namespace='ns1')
# create and add a new node
node3 = Node('node3',
cluster,
True,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300',
'0',
None,
binary_interface=('127.0.0.3', 9042))
cluster.add(node3, True)
node3.start(wait_for_binary_proto=True)
wait(30)
self.validate_schema_consistent(node1)