本文整理汇总了Python中ccmlib.node.Node.grep_log方法的典型用法代码示例。如果您正苦于以下问题:Python Node.grep_log方法的具体用法?Python Node.grep_log怎么用?Python Node.grep_log使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ccmlib.node.Node
的用法示例。
在下文中一共展示了Node.grep_log方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: replace_active_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
def replace_active_node_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
[node1,node2, node3] = cluster.nodelist()
debug("Inserting Data...")
if cluster.version() < "2.1":
node1.stress(['-o', 'insert', '--num-keys=10000', '--replication-factor=3'])
else:
node1.stress(['write', 'n=10000', '-schema', 'replication(factor=3)'])
cursor = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1' if self.cluster.version() >= '2.1' else '"Keyspace1"."Standard1"'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = cursor.execute(query)
#replace active node 3 with node 4
debug("Starting node 4 to replace active node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, ('127.0.0.4',9042))
cluster.add(node4, False)
with self.assertRaises(NodeError):
try:
node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)
except (NodeError, TimeoutError):
raise NodeError("Node could not start.")
checkError = node4.grep_log("java.lang.UnsupportedOperationException: Cannot replace a live node...")
self.assertEqual(len(checkError), 1)
示例2: replace_active_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
def replace_active_node_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
#replace active node 3 with node 4
debug("Starting node 4 to replace active node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, ('127.0.0.4', 9042))
cluster.add(node4, False)
with self.assertRaises(NodeError):
try:
node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)
except (NodeError, TimeoutError):
raise NodeError("Node could not start.")
checkError = node4.grep_log("java.lang.UnsupportedOperationException: Cannot replace a live node...")
self.assertEqual(len(checkError), 1)
示例3: _replace_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
def _replace_node_test(self, gently):
"""
Check that the replace address function correctly replaces a node that has failed in a cluster.
Create a cluster, cause a node to fail, and bring up a new node with the replace_address parameter.
Check that tokens are migrated and that data is replicated properly.
"""
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
if DISABLE_VNODES:
numNodes = 1
else:
# a little hacky but grep_log returns the whole line...
numNodes = int(node3.get_conf_option('num_tokens'))
debug(numNodes)
debug("Inserting Data...")
node1.stress(['write', 'n=10K', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
session.default_timeout = 45
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = list(session.execute(query))
# stop node, query should not work with consistency 3
debug("Stopping node 3.")
node3.stop(gently=gently, wait_other_notice=True)
debug("Testing node stoppage (query should fail).")
with self.assertRaises(NodeUnavailable):
try:
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
session.execute(query)
except (Unavailable, ReadTimeout):
raise NodeUnavailable("Node could not be queried.")
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, binary_interface=('127.0.0.4', 9042))
cluster.add(node4, False)
node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)
# query should work again
debug("Verifying querying works again.")
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
finalData = list(session.execute(query))
self.assertListEqual(initialData, finalData)
debug("Verifying tokens migrated sucessfully")
movedTokensList = node4.grep_log("Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
debug(movedTokensList[0])
self.assertEqual(len(movedTokensList), numNodes)
# check that restarting node 3 doesn't work
debug("Try to restart node 3 (should fail)")
node3.start(wait_other_notice=False)
checkCollision = node1.grep_log("between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner")
debug(checkCollision)
self.assertEqual(len(checkCollision), 1)
示例4: replace_first_boot_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
def replace_first_boot_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
if DISABLE_VNODES:
numNodes = 1
else:
# a little hacky but grep_log returns the whole line...
numNodes = int(node3.get_conf_option('num_tokens'))
debug(numNodes)
debug("Inserting Data...")
node1.stress(['write', 'n=10K', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initialData = list(session.execute(query))
# stop node, query should not work with consistency 3
debug("Stopping node 3.")
node3.stop(gently=False)
debug("Testing node stoppage (query should fail).")
with self.assertRaises(NodeUnavailable):
try:
session.execute(query, timeout=30)
except (Unavailable, ReadTimeout):
raise NodeUnavailable("Node could not be queried.")
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster, True, ('127.0.0.4', 9160), ('127.0.0.4', 7000), '7400', '0', None, binary_interface=('127.0.0.4', 9042))
cluster.add(node4, False)
node4.start(jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"], wait_for_binary_proto=True)
# query should work again
debug("Verifying querying works again.")
finalData = list(session.execute(query))
self.assertListEqual(initialData, finalData)
debug("Verifying tokens migrated sucessfully")
movedTokensList = node4.grep_log("Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
debug(movedTokensList[0])
self.assertEqual(len(movedTokensList), numNodes)
# check that restarting node 3 doesn't work
debug("Try to restart node 3 (should fail)")
node3.start(wait_other_notice=False)
checkCollision = node1.grep_log("between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner")
debug(checkCollision)
self.assertEqual(len(checkCollision), 1)
# restart node4 (if error's might have to change num_tokens)
node4.stop(gently=False)
node4.start(wait_for_binary_proto=True, wait_other_notice=False)
debug("Verifying querying works again.")
finalData = list(session.execute(query))
self.assertListEqual(initialData, finalData)
# we redo this check because restarting node should not result in tokens being moved again, ie number should be same
debug("Verifying tokens migrated sucessfully")
movedTokensList = node4.grep_log("Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
debug(movedTokensList[0])
self.assertEqual(len(movedTokensList), numNodes)
示例5: _replace_node_test
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
def _replace_node_test(self, gently):
"""
Check that the replace address function correctly replaces a node that has failed in a cluster.
Create a cluster, cause a node to fail, and bring up a new node with the replace_address parameter.
Check that tokens are migrated and that data is replicated properly.
"""
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
if DISABLE_VNODES:
num_tokens = 1
else:
# a little hacky but grep_log returns the whole line...
num_tokens = int(node3.get_conf_option('num_tokens'))
debug("testing with num_tokens: {}".format(num_tokens))
debug("Inserting Data...")
node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])
session = self.patient_cql_connection(node1)
session.default_timeout = 45
stress_table = 'keyspace1.standard1'
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
initial_data = rows_to_list(session.execute(query))
# stop node, query should not work with consistency 3
debug("Stopping node 3.")
node3.stop(gently=gently, wait_other_notice=True)
debug("Testing node stoppage (query should fail).")
with self.assertRaises(NodeUnavailable):
try:
query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)
session.execute(query)
except (Unavailable, ReadTimeout):
raise NodeUnavailable("Node could not be queried.")
# replace node 3 with node 4
debug("Starting node 4 to replace node 3")
node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),
storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',
initial_token=None, binary_interface=('127.0.0.4', 9042))
cluster.add(node4, False)
node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)
debug("Verifying tokens migrated sucessfully")
moved_tokens = node4.grep_log("Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
debug("number of moved tokens: {}".format(len(moved_tokens)))
self.assertEqual(len(moved_tokens), num_tokens)
# check that restarting node 3 doesn't work
debug("Try to restart node 3 (should fail)")
node3.start(wait_other_notice=False)
collision_log = node1.grep_log("between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner")
debug(collision_log)
self.assertEqual(len(collision_log), 1)
node3.stop(gently=False)
# query should work again
debug("Stopping old nodes")
node1.stop(gently=False, wait_other_notice=True)
node2.stop(gently=False, wait_other_notice=True)
debug("Verifying data on new node.")
session = self.patient_exclusive_cql_connection(node4)
assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),
expected=initial_data,
cl=ConsistencyLevel.ONE)
示例6: BaseReplaceAddressTest
# 需要导入模块: from ccmlib.node import Node [as 别名]
# 或者: from ccmlib.node.Node import grep_log [as 别名]
#.........这里部分代码省略.........
return rows_to_list(session.execute(query, timeout=20))
def _verify_data(self, initial_data, table='keyspace1.standard1', cl=ConsistencyLevel.ONE, limit=10000,
restart_nodes=False):
assert len(initial_data) > 0, "Initial data must be greater than 0"
# query should work again
logger.debug("Stopping old nodes")
for node in self.cluster.nodelist():
if node.is_running() and node != self.replacement_node:
logger.debug("Stopping {}".format(node.name))
node.stop(gently=False, wait_other_notice=True)
logger.debug("Verifying {} on {} with CL={} and LIMIT={}".format(table, self.replacement_node.address(), cl, limit))
session = self.patient_exclusive_cql_connection(self.replacement_node)
assert_all(session, 'select * from {} LIMIT {}'.format(table, limit),
expected=initial_data,
cl=cl)
def _verify_replacement(self, node, same_address):
if not same_address:
if self.cluster.cassandra_version() >= '2.2.7':
address_prefix = '' if self.cluster.cassandra_version() >= '4.0' else '/'
node.watch_log_for("Node {}{} is replacing {}{}"
.format(address_prefix, self.replacement_node.address_for_current_version(),
address_prefix, self.replaced_node.address_for_current_version()),
timeout=60, filename='debug.log')
node.watch_log_for("Node {}{} will complete replacement of {}{} for tokens"
.format(address_prefix, self.replacement_node.address_for_current_version(),
address_prefix, self.replaced_node.address_for_current_version()), timeout=10)
node.watch_log_for("removing endpoint {}{}".format(address_prefix, self.replaced_node.address_for_current_version()),
timeout=60, filename='debug.log')
else:
node.watch_log_for("between /{} and /{}; /{} is the new owner"
.format(self.replaced_node.address(),
self.replacement_node.address(),
self.replacement_node.address()),
timeout=60)
def _verify_tokens_migrated_successfully(self, previous_log_size=None):
if not self.dtest_config.use_vnodes:
num_tokens = 1
else:
# a little hacky but grep_log returns the whole line...
num_tokens = int(self.replacement_node.get_conf_option('num_tokens'))
logger.debug("Verifying {} tokens migrated sucessfully".format(num_tokens))
logs = self.replacement_node.grep_log(r"Token (.*?) changing ownership from /{} to /{}"
.format(self.replaced_node.address(),
self.replacement_node.address()))
if (previous_log_size is not None):
assert len(logs) == previous_log_size
moved_tokens = set([l[1].group(1) for l in logs])
logger.debug("number of moved tokens: {}".format(len(moved_tokens)))
assert len(moved_tokens) == num_tokens
return len(logs)
def _test_insert_data_during_replace(self, same_address, mixed_versions=False):
"""
@jira_ticket CASSANDRA-8523
"""
default_install_dir = self.cluster.get_install_dir()
self._setup(opts={'hinted_handoff_enabled': False}, mixed_versions=mixed_versions)
self._insert_data(n='1k')
initial_data = self._fetch_initial_data()
self._stop_node_to_replace()
if mixed_versions:
logger.debug("Upgrading all except {} to current version".format(self.query_node.address()))
self.cluster.set_install_dir(install_dir=default_install_dir)
for node in self.cluster.nodelist():
if node.is_running() and node != self.query_node:
logger.debug("Upgrading {} to current version".format(node.address()))
node.stop(gently=True, wait_other_notice=True)
node.start(wait_other_notice=True, wait_for_binary_proto=True)
# start node in current version on write survey mode
self._do_replace(same_address=same_address, extra_jvm_args=["-Dcassandra.write_survey=true"])
# Insert additional keys on query node
self._insert_data(n='2k', whitelist=True)
# If not same address or mixed versions, query node should forward writes to replacement node
# so we update initial data to reflect additional keys inserted during replace
if not same_address and not mixed_versions:
initial_data = self._fetch_initial_data(cl=ConsistencyLevel.TWO)
logger.debug("Joining replaced node")
self.replacement_node.nodetool("join")
if not same_address:
for node in self.cluster.nodelist():
# if mixed version, query node is not upgraded so it will not print replacement log
if node.is_running() and (not mixed_versions or node != self.query_node):
self._verify_replacement(node, same_address)
self._verify_data(initial_data)