本文整理汇总了Python中tablet.kill_tablets函数的典型用法代码示例。如果您正苦于以下问题:Python kill_tablets函数的具体用法?Python kill_tablets怎么用?Python kill_tablets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了kill_tablets函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tearDownModule
def tearDownModule():
global vtgate_server
logging.debug("in tearDownModule")
if utils.options.skip_teardown:
return
logging.debug("Tearing down the servers and setup")
utils.vtgate_kill(vtgate_server)
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_1_master,
shard_1_replica])
teardown_procs = [shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
示例2: test_reparent_cross_cell
def test_reparent_cross_cell(self, shard_id="0"):
utils.run_vtctl("CreateKeyspace test_keyspace")
# create the database so vttablets start, as they are serving
tablet_62344.create_db("vt_test_keyspace")
tablet_62044.create_db("vt_test_keyspace")
tablet_41983.create_db("vt_test_keyspace")
tablet_31981.create_db("vt_test_keyspace")
# Start up a master mysql and vttablet
tablet_62344.init_tablet("master", "test_keyspace", shard_id, start=True)
if environment.topo_server_implementation == "zookeeper":
shard = utils.run_vtctl_json(["GetShard", "test_keyspace/" + shard_id])
self.assertEqual(shard["Cells"], ["test_nj"], "wrong list of cell in Shard: %s" % str(shard["Cells"]))
# Create a few slaves for testing reparenting.
tablet_62044.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
tablet_41983.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
tablet_31981.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
for t in [tablet_62044, tablet_41983, tablet_31981]:
t.wait_for_vttablet_state("SERVING")
if environment.topo_server_implementation == "zookeeper":
shard = utils.run_vtctl_json(["GetShard", "test_keyspace/" + shard_id])
self.assertEqual(
shard["Cells"], ["test_nj", "test_ny"], "wrong list of cell in Shard: %s" % str(shard["Cells"])
)
# Recompute the shard layout node - until you do that, it might not be valid.
utils.run_vtctl("RebuildShardGraph test_keyspace/" + shard_id)
utils.validate_topology()
# Force the slaves to reparent assuming that all the datasets are identical.
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.reset_replication()
utils.pause("force ReparentShard?")
utils.run_vtctl("ReparentShard -force test_keyspace/%s %s" % (shard_id, tablet_62344.tablet_alias))
utils.validate_topology(ping_tablets=True)
self._check_db_addr(shard_id, "master", tablet_62344.port)
# Verify MasterCell is properly set
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_nj", "test_keyspace/%s" % (shard_id)])
self.assertEqual(srvShard["MasterCell"], "test_nj")
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_ny", "test_keyspace/%s" % (shard_id)])
self.assertEqual(srvShard["MasterCell"], "test_nj")
# Perform a graceful reparent operation to another cell.
utils.pause("graceful ReparentShard?")
utils.run_vtctl("ReparentShard test_keyspace/%s %s" % (shard_id, tablet_31981.tablet_alias), auto_log=True)
utils.validate_topology()
self._check_db_addr(shard_id, "master", tablet_31981.port, cell="test_ny")
# Verify MasterCell is set to new cell.
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_nj", "test_keyspace/%s" % (shard_id)])
self.assertEqual(srvShard["MasterCell"], "test_ny")
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_ny", "test_keyspace/%s" % (shard_id)])
self.assertEqual(srvShard["MasterCell"], "test_ny")
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981])
示例3: _teardown_shard_2
def _teardown_shard_2():
tablet.kill_tablets(shard_2_tablets)
utils.run_vtctl(["DeleteShard", "-recursive", "test_keyspace/2"], auto_log=True)
for t in shard_2_tablets:
t.clean_dbs()
示例4: teardown
def teardown(self):
all_tablets = self.tablet_map.values()
tablet.kill_tablets(all_tablets)
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
for t in all_tablets:
t.remove_tree()
示例5: tearDownModule
def tearDownModule():
if utils.options.skip_teardown:
return
tablet.kill_tablets([src_master, src_replica, src_rdonly1, src_rdonly2,
dst_master, dst_replica])
teardown_procs = [
src_master.teardown_mysql(),
src_replica.teardown_mysql(),
src_rdonly1.teardown_mysql(),
src_rdonly2.teardown_mysql(),
dst_master.teardown_mysql(),
dst_replica.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
src_master.remove_tree()
src_replica.remove_tree()
src_rdonly1.remove_tree()
src_rdonly2.remove_tree()
dst_master.remove_tree()
dst_replica.remove_tree()
示例6: tearDownModule
def tearDownModule():
if utils.options.skip_teardown:
return
tablet.kill_tablets([shard_0_master, shard_0_replica,
shard_1_master, shard_1_replica])
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
unsharded_master.teardown_mysql(),
unsharded_replica.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
unsharded_master.remove_tree()
unsharded_replica.remove_tree()
示例7: tearDown
def tearDown(self):
# kill everything
tablet.kill_tablets([source_master, source_replica, source_rdonly1,
source_rdonly2, destination_master,
destination_replica, destination_rdonly1,
destination_rdonly2])
utils.vtgate.kill()
示例8: test_no_mysql_healthcheck
def test_no_mysql_healthcheck(self):
"""This test starts a vttablet with no mysql port, while mysql is down.
It makes sure vttablet will start properly and be unhealthy.
Then we start mysql, and make sure vttablet becomes healthy.
"""
# we need replication to be enabled, so the slave tablet can be healthy.
for t in tablet_62344, tablet_62044:
t.create_db("vt_test_keyspace")
pos = mysql_flavor().master_position(tablet_62344)
changeMasterCmds = mysql_flavor().change_master_commands(utils.hostname, tablet_62344.mysql_port, pos)
tablet_62044.mquery("", ["RESET MASTER", "RESET SLAVE"] + changeMasterCmds + ["START SLAVE"])
# now shutdown all mysqld
shutdown_procs = [tablet_62344.shutdown_mysql(), tablet_62044.shutdown_mysql()]
utils.wait_procs(shutdown_procs)
# start the tablets, wait for them to be NOT_SERVING (mysqld not there)
tablet_62344.init_tablet("master", "test_keyspace", "0")
tablet_62044.init_tablet("spare", "test_keyspace", "0", include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.start_vttablet(
wait_for_state=None, target_tablet_type="replica", full_mycnf_args=True, include_mysql_port=False
)
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state("NOT_SERVING")
self.check_healthz(t, False)
# restart mysqld
start_procs = [tablet_62344.start_mysql(), tablet_62044.start_mysql()]
utils.wait_procs(start_procs)
# the master should still be healthy
utils.run_vtctl(["RunHealthCheck", tablet_62344.tablet_alias, "replica"], auto_log=True)
self.check_healthz(tablet_62344, True)
# the slave won't be healthy at first, as replication is not running
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "replica"], auto_log=True)
self.check_healthz(tablet_62044, False)
tablet_62044.wait_for_vttablet_state("NOT_SERVING")
# restart replication
tablet_62044.mquery("", ["START SLAVE"])
# wait for the tablet to become healthy and fix its mysql port
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "replica"], auto_log=True)
tablet_62044.wait_for_vttablet_state("SERVING")
self.check_healthz(tablet_62044, True)
for t in tablet_62344, tablet_62044:
# wait for mysql port to show up
timeout = 10
while True:
ti = utils.run_vtctl_json(["GetTablet", t.tablet_alias])
if "mysql" in ti["Portmap"]:
break
timeout = utils.wait_step("mysql port in tablet record", timeout)
self.assertEqual(ti["Portmap"]["mysql"], t.mysql_port)
# all done
tablet.kill_tablets([tablet_62344, tablet_62044])
示例9: test_primecache
def test_primecache(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
master.init_tablet( 'master', 'test_keyspace', '0')
replica.init_tablet('idle')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
master.create_db('vt_test_keyspace')
master.start_vttablet(wait_for_state=None)
replica.start_vttablet(wait_for_state=None)
master.wait_for_vttablet_state('SERVING')
replica.wait_for_vttablet_state('NOT_SERVING') # DB doesn't exist
self._create_data()
# we use clone to not prime the mysql cache on the slave db
utils.run_vtctl(['Clone', '-force', '-server-mode',
master.tablet_alias, replica.tablet_alias],
auto_log=True)
# sync the buffer cache, and clear it. This will prompt for user's password
utils.run(['sync'])
utils.run(['sudo', 'bash', '-c', 'echo 1 > /proc/sys/vm/drop_caches'])
# we can now change data on the master for 30s, while slave is stopped.
# master's binlog will be in OS buffer cache now.
replica.mquery('', 'slave stop')
self._change_random_data()
use_primecache = True # easy to test without
if use_primecache:
# starting vtprimecache, sleeping for a couple seconds
args = environment.binary_args('vtprimecache') + [
'-db-config-dba-uname', 'vt_dba',
'-db-config-dba-charset', 'utf8',
'-db-config-dba-dbname', 'vt_test_keyspace',
'-db-config-app-uname', 'vt_app',
'-db-config-app-charset', 'utf8',
'-db-config-app-dbname', 'vt_test_keyspace',
'-relay_logs_path', replica.tablet_dir+'/relay-logs',
'-mysql_socket_file', replica.tablet_dir+'/mysql.sock',
'-log_dir', environment.vtlogroot,
'-worker_count', '4',
'-alsologtostderr',
]
vtprimecache = utils.run_bg(args)
time.sleep(2)
# start slave, see how longs it takes to catch up on replication
replica.mquery('', 'slave start')
self.catch_up()
if use_primecache:
# TODO(alainjobart): read and check stats
utils.kill_sub_process(vtprimecache)
tablet.kill_tablets([master, replica])
示例10: test_health_check_worker_state_does_not_shutdown_query_service
def test_health_check_worker_state_does_not_shutdown_query_service(self):
# This test is similar to test_health_check, but has the following
# differences:
# - the second tablet is an 'rdonly' and not a 'replica'
# - the second tablet will be set to 'worker' and we expect that
# the query service won't be shutdown
# Setup master and rdonly tablets.
tablet_62344.init_tablet("master", "test_keyspace", "0")
for t in tablet_62344, tablet_62044:
t.create_db("vt_test_keyspace")
tablet_62344.start_vttablet(wait_for_state=None, target_tablet_type="replica")
tablet_62044.start_vttablet(
wait_for_state=None, target_tablet_type="rdonly", init_keyspace="test_keyspace", init_shard="0"
)
tablet_62344.wait_for_vttablet_state("SERVING")
tablet_62044.wait_for_vttablet_state("NOT_SERVING")
self.check_healthz(tablet_62044, False)
# Enable replication.
utils.run_vtctl(["InitShardMaster", "test_keyspace/0", tablet_62344.tablet_alias])
# Trigger healthcheck to save time waiting for the next interval.
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, "rdonly")
self.check_healthz(tablet_62044, True)
tablet_62044.wait_for_vttablet_state("SERVING")
# Change from rdonly to worker and stop replication. (These
# actions are similar to the SplitClone vtworker command
# implementation.) The tablet will become unhealthy, but the
# query service is still running.
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "worker"])
utils.run_vtctl(["StopSlave", tablet_62044.tablet_alias])
# Trigger healthcheck explicitly to avoid waiting for the next interval.
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, "worker")
self.check_healthz(tablet_62044, False)
# Make sure that replication got disabled.
self.assertIn(
">unhealthy: replication_reporter: " "Replication is not running</span></div>", tablet_62044.get_status()
)
# Query service is still running.
tablet_62044.wait_for_vttablet_state("SERVING")
# Restart replication. Tablet will become healthy again.
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "spare"])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, "spare")
utils.run_vtctl(["StartSlave", tablet_62044.tablet_alias])
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, "rdonly")
self.check_healthz(tablet_62044, True)
tablet_62044.wait_for_vttablet_state("SERVING")
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
示例11: shutdown
def shutdown(self):
tablet.kill_tablets(self.tablets)
teardown_procs = [t.teardown_mysql() for t in self.tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in self.tablets:
t.remove_tree()
示例12: test_no_mysql_healthcheck
def test_no_mysql_healthcheck(self):
"""This test starts a vttablet with no mysql port, while mysql is down.
It makes sure vttablet will start properly and be unhealthy.
Then we start mysql, and make sure vttablet becomes healthy.
"""
# we need replication to be enabled, so the slave tablet can be healthy.
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
pos = mysql_flavor().master_position(tablet_62344)
changeMasterCmds = mysql_flavor().change_master_commands(
utils.hostname,
tablet_62344.mysql_port,
pos)
tablet_62044.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
changeMasterCmds +
['START SLAVE'])
# now shutdown all mysqld
shutdown_procs = [
tablet_62344.shutdown_mysql(),
tablet_62044.shutdown_mysql(),
]
utils.wait_procs(shutdown_procs)
# start the tablets, wait for them to be NOT_SERVING (mysqld not there)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('spare', 'test_keyspace', '0',
include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.start_vttablet(wait_for_state=None,
target_tablet_type='replica',
full_mycnf_args=True, include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
# restart mysqld
start_procs = [
tablet_62344.start_mysql(),
tablet_62044.start_mysql(),
]
utils.wait_procs(start_procs)
# wait for the tablets to become healthy and fix their mysql port
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('SERVING')
for t in tablet_62344, tablet_62044:
# wait for mysql port to show up
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
if 'mysql' in ti['Portmap']:
break
timeout = utils.wait_step('mysql port in tablet record', timeout)
self.assertEqual(ti['Portmap']['mysql'], t.mysql_port)
# all done
tablet.kill_tablets([tablet_62344, tablet_62044])
示例13: test_reparent_lag_slave
def test_reparent_lag_slave(self, shard_id='0'):
utils.run_vtctl('CreateKeyspace test_keyspace')
# create the database so vttablets start, as they are serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
tablet_41983.create_db('vt_test_keyspace')
tablet_31981.create_db('vt_test_keyspace')
# Start up a master mysql and vttablet
tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True, wait_for_start=False)
# Create a few slaves for testing reparenting.
tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True, wait_for_start=False)
tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True, wait_for_start=False)
tablet_41983.init_tablet('lag', 'test_keyspace', shard_id, start=True, wait_for_start=False)
# wait for all tablets to start
for t in [tablet_62344, tablet_62044, tablet_31981]:
t.wait_for_vttablet_state("SERVING")
tablet_41983.wait_for_vttablet_state("NOT_SERVING")
# Recompute the shard layout node - until you do that, it might not be valid.
utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
utils.validate_topology()
# Force the slaves to reparent assuming that all the datasets are identical.
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.reset_replication()
utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' % (shard_id, tablet_62344.tablet_alias))
utils.validate_topology(ping_tablets=True)
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.mquery('vt_test_keyspace', self._create_vt_insert_test)
tablet_41983.mquery('', 'stop slave')
for q in self._populate_vt_insert_test:
tablet_62344.mquery('vt_test_keyspace', q, write=True)
# Perform a graceful reparent operation.
utils.run_vtctl('ReparentShard test_keyspace/%s %s' % (shard_id, tablet_62044.tablet_alias))
tablet_41983.mquery('', 'start slave')
time.sleep(1)
utils.pause("check orphan")
utils.run_vtctl('ReparentTablet %s' % tablet_41983.tablet_alias)
result = tablet_41983.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=1')
if len(result) != 1:
self.fail('expected 1 row from vt_insert_test: %s' % str(result))
utils.pause("check lag reparent")
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981])
示例14: test_health_check_drained_state_does_not_shutdown_query_service
def test_health_check_drained_state_does_not_shutdown_query_service(self):
# This test is similar to test_health_check, but has the following
# differences:
# - the second tablet is an 'rdonly' and not a 'replica'
# - the second tablet will be set to 'drained' and we expect that
# the query service won't be shutdown
# Setup master and rdonly tablets.
tablet_62344.init_tablet('replica', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
# Note we only have a master and a rdonly. So we can't enable
# semi-sync in this case, as the rdonly slaves don't semi-sync ack.
tablet_62344.start_vttablet(wait_for_state=None, enable_semi_sync=False)
tablet_62044.start_vttablet(wait_for_state=None,
init_tablet_type='rdonly',
init_keyspace='test_keyspace',
init_shard='0',
enable_semi_sync=False)
tablet_62344.wait_for_vttablet_state('NOT_SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
# Enable replication.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Trigger healthcheck to save time waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
# Change from rdonly to drained and stop replication. (These
# actions are similar to the SplitClone vtworker command
# implementation.) The tablet will stay healthy, and the
# query service is still running.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'drained'])
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
# Trigger healthcheck explicitly to avoid waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'drained')
self.check_healthz(tablet_62044, True)
# Query service is still running.
tablet_62044.wait_for_vttablet_state('SERVING')
# Restart replication. Tablet will become healthy again.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'rdonly'])
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
示例15: _teardown_shard_2
def _teardown_shard_2():
tablet.kill_tablets(shard_2_tablets)
utils.run_vtctl(
['DeleteShard', '-recursive', 'test_keyspace/2'], auto_log=True)
for t in shard_2_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()