本文整理汇总了Python中utils.run_vtctl_json函数的典型用法代码示例。如果您正苦于以下问题:Python run_vtctl_json函数的具体用法?Python run_vtctl_json怎么用?Python run_vtctl_json使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_vtctl_json函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_reparent_from_outside_check
def _test_reparent_from_outside_check(self, brutal, base_time):
# make sure the shard replication graph is fine
shard_replication = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
hashed_nodes = {}
for node in shard_replication['nodes']:
key = node['tablet_alias']['cell']+'-'+str(node['tablet_alias']['uid'])
hashed_nodes[key] = True
logging.debug('Got shard replication nodes: %s', str(hashed_nodes))
expected_nodes = {
'test_nj-41983': True,
'test_nj-62044': True,
}
if not brutal:
expected_nodes['test_nj-62344'] = True
self.assertEqual(expected_nodes, hashed_nodes,
'Got unexpected nodes: %s != %s' % (str(expected_nodes),
str(hashed_nodes)))
# make sure the master status page says it's the master
tablet_62044_master_status = tablet_62044.get_status()
self.assertIn('Serving graph: test_keyspace 0 master',
tablet_62044_master_status)
# make sure the master health stream says it's the master too
# (health check is disabled on these servers, force it first)
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'replica'])
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertEqual(health['target']['tablet_type'], topodata_pb2.MASTER)
# have to compare the int version, or the rounding errors can break
self.assertTrue(
health['tablet_externally_reparented_timestamp'] >= int(base_time))
示例2: test_scrap_and_reinit
def test_scrap_and_reinit(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(before_scrap['ReplicationLinks']), 'wrong replication links before: %s' % str(before_scrap))
# scrap and re-init
utils.run_vtctl(['ScrapTablet', '-force', tablet_62044.tablet_alias])
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
after_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links after: %s' % str(after_scrap))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0', 'test_nj-0000066666', 'test_nj-0000062344'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(with_bogus['ReplicationLinks']), 'wrong replication links with bogus: %s' % str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'], auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links after fix: %s' % str(after_fix))
示例3: test_vtctl_copyschemashard_different_dbs_should_fail
def test_vtctl_copyschemashard_different_dbs_should_fail(self):
# Apply initial schema to the whole keyspace before creating shard 2.
self._apply_initial_schema()
_setup_shard_2()
try:
# InitShardMaster creates the db, but there shouldn't be any tables yet.
self._check_tables(shard_2_master, 0)
self._check_tables(shard_2_replica1, 0)
# Change the db charset on the destination shard from utf8 to latin1.
# This will make CopySchemaShard fail during its final diff.
# (The different charset won't be corrected on the destination shard
# because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if
# there are differences in the options e.g. the character set.)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertIn('utf8', shard_2_schema['database_schema'])
utils.run_vtctl_json(
['ExecuteFetchAsDba', '-json', shard_2_master.tablet_alias,
'ALTER DATABASE vt_test_keyspace CHARACTER SET latin1'])
_, stderr = utils.run_vtctl(['CopySchemaShard',
'test_keyspace/0',
'test_keyspace/2'],
expect_fail=True,
auto_log=True)
self.assertIn('source and dest don\'t agree on database creation command',
stderr)
# shard_2_master should have the same number of tables. Only the db
# character set is different.
self._check_tables(shard_2_master, 4)
finally:
_teardown_shard_2()
示例4: test_shard_replication_fix
def test_shard_replication_fix(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(before_bogus['nodes']),
'wrong shard replication nodes before: %s' %
str(before_bogus))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
'test_nj-0000066666'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(3, len(with_bogus['nodes']),
'wrong shard replication nodes with bogus: %s' %
str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(after_fix['nodes']),
'wrong shard replication nodes after fix: %s' %
str(after_fix))
示例5: _test_reparent_from_outside_check
def _test_reparent_from_outside_check(self, brutal, base_time):
# make sure the shard replication graph is fine
shard_replication = utils.run_vtctl_json(["GetShardReplication", "test_nj", "test_keyspace/0"])
hashed_nodes = {}
for node in shard_replication["nodes"]:
key = node["tablet_alias"]["cell"] + "-" + str(node["tablet_alias"]["uid"])
hashed_nodes[key] = True
logging.debug("Got shard replication nodes: %s", str(hashed_nodes))
expected_nodes = {"test_nj-41983": True, "test_nj-62044": True}
if not brutal:
expected_nodes["test_nj-62344"] = True
self.assertEqual(
expected_nodes, hashed_nodes, "Got unexpected nodes: %s != %s" % (str(expected_nodes), str(hashed_nodes))
)
# make sure the master status page says it's the master
tablet_62044_master_status = tablet_62044.get_status()
self.assertIn("Serving graph: test_keyspace 0 master", tablet_62044_master_status)
# make sure the master health stream says it's the master too
# (health check is disabled on these servers, force it first)
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "replica"])
health = utils.run_vtctl_json(["VtTabletStreamHealth", "-count", "1", tablet_62044.tablet_alias])
self.assertEqual(health["target"]["tablet_type"], topodata_pb2.MASTER)
# have to compare the int version, or the rounding errors can break
self.assertTrue(health["tablet_externally_reparented_timestamp"] >= int(base_time))
示例6: test_master_restart_sets_ter_timestamp
def test_master_restart_sets_ter_timestamp(self):
"""Test that TER timestamp is set when we restart the MASTER vttablet.
TER = TabletExternallyReparented.
See StreamHealthResponse.tablet_externally_reparented_timestamp for details.
"""
master, replica = tablet_62344, tablet_62044
tablets = [master, replica]
# Start vttablets. Our future master is initially a REPLICA.
for t in tablets:
t.create_db('vt_test_keyspace')
for t in tablets:
t.start_vttablet(wait_for_state='NOT_SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Initialize tablet as MASTER.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
master.tablet_alias])
master.wait_for_vttablet_state('SERVING')
# Capture the current TER.
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
self.assertEqual(topodata_pb2.MASTER, health['target']['tablet_type'])
self.assertIn('tablet_externally_reparented_timestamp', health)
self.assertGreater(health['tablet_externally_reparented_timestamp'], 0,
'TER on MASTER must be set after InitShardMaster')
# Restart the MASTER vttablet.
master.kill_vttablet()
master.start_vttablet(wait_for_state='SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Make sure that the TER increased i.e. it was set to the current time.
health_after_restart = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
self.assertEqual(topodata_pb2.MASTER,
health_after_restart['target']['tablet_type'])
self.assertIn('tablet_externally_reparented_timestamp',
health_after_restart)
self.assertGreater(
health_after_restart['tablet_externally_reparented_timestamp'],
health['tablet_externally_reparented_timestamp'],
'When the MASTER vttablet was restarted, the TER timestamp must be set'
' to the current time.')
# Shutdown.
for t in tablets:
t.kill_vttablet()
示例7: test_reparent_cross_cell
def test_reparent_cross_cell(self, shard_id="0"):
utils.run_vtctl(["CreateKeyspace", "test_keyspace"])
# create the database so vttablets start, as they are serving
tablet_62344.create_db("vt_test_keyspace")
tablet_62044.create_db("vt_test_keyspace")
tablet_41983.create_db("vt_test_keyspace")
tablet_31981.create_db("vt_test_keyspace")
# Start up a master mysql and vttablet
tablet_62344.init_tablet("master", "test_keyspace", shard_id, start=True, wait_for_start=False)
shard = utils.run_vtctl_json(["GetShard", "test_keyspace/" + shard_id])
self.assertEqual(shard["cells"], ["test_nj"], "wrong list of cell in Shard: %s" % str(shard["cells"]))
# Create a few slaves for testing reparenting.
tablet_62044.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
tablet_41983.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
tablet_31981.init_tablet("replica", "test_keyspace", shard_id, start=True, wait_for_start=False)
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.wait_for_vttablet_state("SERVING")
shard = utils.run_vtctl_json(["GetShard", "test_keyspace/" + shard_id])
self.assertEqual(
shard["cells"], ["test_nj", "test_ny"], "wrong list of cell in Shard: %s" % str(shard["cells"])
)
# Recompute the shard layout node - until you do that, it might not be
# valid.
utils.run_vtctl(["RebuildShardGraph", "test_keyspace/" + shard_id])
utils.validate_topology()
# Force the slaves to reparent assuming that all the datasets are
# identical.
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.reset_replication()
utils.run_vtctl(["InitShardMaster", "test_keyspace/" + shard_id, tablet_62344.tablet_alias], auto_log=True)
utils.validate_topology(ping_tablets=True)
self._check_db_addr(shard_id, "master", tablet_62344.port)
# Verify MasterCell is properly set
self._check_master_cell("test_nj", shard_id, "test_nj")
self._check_master_cell("test_ny", shard_id, "test_nj")
# Perform a graceful reparent operation to another cell.
utils.pause("test_reparent_cross_cell PlannedReparentShard")
utils.run_vtctl(["PlannedReparentShard", "test_keyspace/" + shard_id, tablet_31981.tablet_alias], auto_log=True)
utils.validate_topology()
self._check_db_addr(shard_id, "master", tablet_31981.port, cell="test_ny")
# Verify MasterCell is set to new cell.
self._check_master_cell("test_nj", shard_id, "test_ny")
self._check_master_cell("test_ny", shard_id, "test_ny")
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983, tablet_31981])
示例8: test_reparent_cross_cell
def test_reparent_cross_cell(self, shard_id='0'):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as they are serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
tablet_41983.create_db('vt_test_keyspace')
tablet_31981.create_db('vt_test_keyspace')
# Start up a master mysql and vttablet
tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True,
wait_for_start=False)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/' + shard_id])
self.assertEqual(shard['cells'], ['test_nj'],
'wrong list of cell in Shard: %s' % str(shard['cells']))
# Create a few slaves for testing reparenting. Won't be healthy
# as replication is not running.
tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True,
wait_for_start=False)
tablet_41983.init_tablet('replica', 'test_keyspace', shard_id, start=True,
wait_for_start=False)
tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True,
wait_for_start=False)
tablet_62344.wait_for_vttablet_state('SERVING')
for t in [tablet_62044, tablet_41983, tablet_31981]:
t.wait_for_vttablet_state('NOT_SERVING')
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/' + shard_id])
self.assertEqual(
shard['cells'], ['test_nj', 'test_ny'],
'wrong list of cell in Shard: %s' % str(shard['cells']))
utils.validate_topology()
# Force the slaves to reparent assuming that all the datasets are
# identical.
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', 'test_keyspace/' + shard_id,
tablet_62344.tablet_alias], auto_log=True)
utils.validate_topology(ping_tablets=True)
self._check_master_tablet(tablet_62344)
# Perform a graceful reparent operation to another cell.
utils.pause('test_reparent_cross_cell PlannedReparentShard')
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/' + shard_id,
tablet_31981.tablet_alias], auto_log=True)
utils.validate_topology()
self._check_master_tablet(tablet_31981)
tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983,
tablet_31981])
示例9: _check_master_tablet
def _check_master_tablet(self, t, port=None):
"""Makes sure the tablet type is master, and its health check agrees."""
ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
self.assertEqual(ti['type'], topodata_pb2.MASTER)
if port:
self.assertEqual(ti['port_map']['vt'], port)
# make sure the health stream is updated
health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1',
t.tablet_alias])
self.assertIn('serving', health)
self.assertEqual(health['target']['tablet_type'], topodata_pb2.MASTER)
示例10: test_health_check
def test_health_check(self):
utils.run_vtctl('CreateKeyspace test_keyspace')
# one master, one replica that starts in spare
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('spare', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None, target_tablet_type='replica')
tablet_62044.start_vttablet(wait_for_state=None, target_tablet_type='replica')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/0', tablet_62344.tablet_alias])
# make sure the 'spare' slave goes to 'replica'
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
if ti['Type'] == "replica":
logging.info("Slave tablet went to replica, good")
break
timeout = utils.wait_step('slave tablet going to replica', timeout)
# make sure the master is still master
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
self.assertEqual(ti['Type'], 'master', "unexpected master type: %s" % ti['Type'])
# stop replication on the slave, see it trigger the slave going
# slightly unhealthy
tablet_62044.mquery('', 'stop slave')
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
if 'Health' in ti and ti['Health']:
if 'replication_lag' in ti['Health']:
if ti['Health']['replication_lag'] == 'high':
logging.info("Slave tablet replication_lag went to high, good")
break
timeout = utils.wait_step('slave has high replication lag', timeout)
# make sure the serving graph was updated
ep = utils.run_vtctl_json(['GetEndPoints', 'test_nj', 'test_keyspace/0', 'replica'])
if not ep['entries'][0]['health']:
self.fail('Replication lag parameter not propagated to serving graph: %s' % str(ep))
self.assertEqual(ep['entries'][0]['health']['replication_lag'], 'high', 'Replication lag parameter not propagated to serving graph: %s' % str(ep))
tablet.kill_tablets([tablet_62344, tablet_62044])
示例11: test_scrap
def test_scrap(self):
# Start up a master mysql and vttablet
utils.run_vtctl(["CreateKeyspace", "test_keyspace"])
tablet_62344.init_tablet("master", "test_keyspace", "0")
tablet_62044.init_tablet("replica", "test_keyspace", "0")
utils.run_vtctl(["RebuildShardGraph", "test_keyspace/*"])
utils.validate_topology()
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_nj", "test_keyspace/0"])
self.assertEqual(srvShard["MasterCell"], "test_nj")
tablet_62044.scrap(force=True)
utils.validate_topology()
srvShard = utils.run_vtctl_json(["GetSrvShard", "test_nj", "test_keyspace/0"])
self.assertEqual(srvShard["MasterCell"], "test_nj")
示例12: test_scrap
def test_scrap(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/*'])
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
tablet_62044.scrap(force=True)
utils.validate_topology()
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj', 'test_keyspace/0'])
self.assertEqual(srvShard['MasterCell'], 'test_nj')
示例13: test_health_check_uid_collision
def test_health_check_uid_collision(self):
# If two tablets are running with the same UID, we should prevent the
# healthcheck on the older one from modifying the tablet record after the
# record has been claimed by a newer instance.
tablet_62344.init_tablet('master', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
# Before starting tablets, simulate another tablet
# owning the replica's record.
utils.run_vtctl(['InitTablet', '-allow_update', '-hostname', 'localhost',
'-keyspace', 'test_keyspace', '-shard', '0', '-port', '0',
'-parent', tablet_62044.tablet_alias, 'replica'])
# Set up tablets.
tablet_62344.start_vttablet(wait_for_state=None,
target_tablet_type='replica')
tablet_62044.start_vttablet(wait_for_state=None,
target_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_62344.tablet_alias])
tablet_62044.wait_for_vttablet_state('SERVING')
# Check that the tablet owns the record.
tablet_record = utils.run_vtctl_json(['GetTablet',
tablet_62044.tablet_alias])
self.assertEquals(tablet_record['port_map']['vt'], tablet_62044.port,
"tablet didn't take over the record")
# Take away ownership again.
utils.run_vtctl(['InitTablet', '-allow_update', '-hostname', 'localhost',
'-keyspace', 'test_keyspace', '-shard', '0', '-port', '0',
'-parent', tablet_62044.tablet_alias, 'replica'])
# Tell the tablets to shutdown gracefully,
# which normally includes going SPARE.
tablet.kill_tablets([tablet_62344, tablet_62044])
# Make sure the tablet record hasn't been touched.
tablet_record = utils.run_vtctl_json(['GetTablet',
tablet_62044.tablet_alias])
self.assertEquals(tablet_record['type'],
tablet_62044.tablet_type_value['REPLICA'],
'tablet changed record without owning it')
示例14: test_sigterm
def test_sigterm(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# start a 'vtctl Sleep' command, don't wait for it
action_path, _ = utils.run_vtctl(['-no-wait', 'Sleep', tablet_62344.tablet_alias, '60s'], trap_output=True)
action_path = action_path.strip()
# wait for the action to be 'Running', capture its pid
timeout = 10
while True:
an = utils.run_vtctl_json(['ReadTabletAction', action_path])
if an.get('State', None) == 'Running':
pid = an['Pid']
logging.info("Action is running with pid %u, good", pid)
break
timeout = utils.wait_step('sleep action to run', timeout)
# let's kill the vtaction process with a regular SIGTERM
os.kill(pid, signal.SIGTERM)
# check the vtctl command got the right remote error back
out, err = utils.run_vtctl(['WaitForAction', action_path], trap_output=True,
raise_on_error=False)
if "vtaction interrupted by signal" not in err:
self.fail("cannot find expected output in error: " + err)
logging.debug("vtaction was interrupted correctly:\n" + err)
tablet_62344.kill_vttablet()
示例15: _verify_vtctl_set_shard_tablet_control
def _verify_vtctl_set_shard_tablet_control(self):
"""Test that manually editing the blacklisted tables works correctly.
TODO(mberlin): This is more an integration test and should be moved to the
Go codebase eventually.
"""
# check 'vtctl SetShardTabletControl' command works as expected:
# clear the rdonly entry:
utils.run_vtctl(['SetShardTabletControl', '--remove', 'source_keyspace/0',
'rdonly'], auto_log=True)
self._assert_tablet_controls([topodata_pb2.MASTER, topodata_pb2.REPLICA])
# re-add rdonly:
utils.run_vtctl(['SetShardTabletControl', '--tables=moving.*,view1',
'source_keyspace/0', 'rdonly'], auto_log=True)
self._assert_tablet_controls([topodata_pb2.MASTER, topodata_pb2.REPLICA,
topodata_pb2.RDONLY])
# and then clear all entries:
utils.run_vtctl(['SetShardTabletControl', '--remove', 'source_keyspace/0',
'rdonly'], auto_log=True)
utils.run_vtctl(['SetShardTabletControl', '--remove', 'source_keyspace/0',
'replica'], auto_log=True)
utils.run_vtctl(['SetShardTabletControl', '--remove', 'source_keyspace/0',
'master'], auto_log=True)
shard_json = utils.run_vtctl_json(['GetShard', 'source_keyspace/0'])
self.assertNotIn('tablet_controls', shard_json)