本文整理汇总了Java中org.apache.solr.common.cloud.ClusterState.getSlice方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterState.getSlice方法的具体用法?Java ClusterState.getSlice怎么用?Java ClusterState.getSlice使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.solr.common.cloud.ClusterState
的用法示例。
在下文中一共展示了ClusterState.getSlice方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createReplica
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private ClusterState createReplica(ClusterState clusterState, ZkNodeProps message) {
log.info("createReplica() {} ", message);
String coll = message.getStr(ZkStateReader.COLLECTION_PROP);
if (!checkCollectionKeyExistence(message)) return clusterState;
String slice = message.getStr(ZkStateReader.SHARD_ID_PROP);
Slice sl = clusterState.getSlice(coll, slice);
if(sl == null){
log.error("Invalid Collection/Slice {}/{} ",coll,slice);
return clusterState;
}
String coreNodeName = Assign.assignNode(coll, clusterState);
Replica replica = new Replica(coreNodeName,
makeMap(
ZkStateReader.CORE_NAME_PROP, message.getStr(ZkStateReader.CORE_NAME_PROP),
ZkStateReader.BASE_URL_PROP,message.getStr(ZkStateReader.BASE_URL_PROP),
ZkStateReader.STATE_PROP,message.getStr(ZkStateReader.STATE_PROP)));
sl.getReplicasMap().put(coreNodeName, replica);
return clusterState;
}
示例2: updateShardState
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private ClusterState updateShardState(ClusterState clusterState, ZkNodeProps message) {
String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
if (!checkCollectionKeyExistence(message)) return clusterState;
log.info("Update shard state invoked for collection: " + collection + " with message: " + message);
for (String key : message.keySet()) {
if (ZkStateReader.COLLECTION_PROP.equals(key)) continue;
if (QUEUE_OPERATION.equals(key)) continue;
Slice slice = clusterState.getSlice(collection, key);
if (slice == null) {
throw new RuntimeException("Overseer.updateShardState unknown collection: " + collection + " slice: " + key);
}
log.info("Update shard state " + key + " to " + message.getStr(key));
Map<String, Object> props = slice.shallowCopy();
if (Slice.RECOVERY.equals(props.get(Slice.STATE)) && Slice.ACTIVE.equals(message.getStr(key))) {
props.remove(Slice.PARENT);
}
props.put(Slice.STATE, message.getStr(key));
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
clusterState = updateSlice(clusterState, collection, newSlice);
}
return clusterState;
}
示例3: removeRoutingRule
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private ClusterState removeRoutingRule(ClusterState clusterState, ZkNodeProps message) {
String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
if (!checkCollectionKeyExistence(message)) return clusterState;
String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
String routeKeyStr = message.getStr("routeKey");
log.info("Overseer.removeRoutingRule invoked for collection: " + collection
+ " shard: " + shard + " routeKey: " + routeKeyStr);
Slice slice = clusterState.getSlice(collection, shard);
if (slice == null) {
log.warn("Unknown collection: " + collection + " shard: " + shard);
return clusterState;
}
Map<String, RoutingRule> routingRules = slice.getRoutingRules();
if (routingRules != null) {
routingRules.remove(routeKeyStr); // no rules left
Map<String, Object> props = slice.shallowCopy();
props.put("routingRules", routingRules);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
clusterState = updateSlice(clusterState, collection, newSlice);
}
return clusterState;
}
示例4: updateStateNew
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private ClusterState updateStateNew(ClusterState clusterState, ZkNodeProps message) {
String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
if (!checkCollectionKeyExistence(message)) return clusterState;
String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
if(collection==null || sliceName == null){
log.error("Invalid collection and slice {}", message);
return clusterState;
}
Slice slice = clusterState.getSlice(collection, sliceName);
if(slice == null){
log.error("No such slice exists {}", message);
return clusterState;
}
return updateState(clusterState, message);
}
示例5: checkSubShardConsistency
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
protected void checkSubShardConsistency(String shard) throws SolrServerException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, shard);
long[] numFound = new long[slice.getReplicasMap().size()];
int c = 0;
for (Replica replica : slice.getReplicas()) {
String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
HttpSolrServer server = new HttpSolrServer(coreUrl);
QueryResponse response;
try {
response = server.query(query);
} finally {
server.shutdown();
}
numFound[c++] = response.getResults().getNumFound();
log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
}
for (int i = 0; i < slice.getReplicasMap().size(); i++) {
assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
}
}
示例6: confirmShardDeletion
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
protected void confirmShardDeletion(String shard) throws SolrServerException, KeeperException,
InterruptedException {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
int counter = 10;
while (counter-- > 0) {
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
if (clusterState.getSlice("collection1", shard) == null) {
break;
}
Thread.sleep(1000);
}
assertNull("Cluster still contains shard1 even after waiting for it to be deleted.",
clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1));
}
示例7: waitForRuleToExpire
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private boolean waitForRuleToExpire(String splitKey, long finishTime) throws KeeperException, InterruptedException, SolrServerException, IOException {
ClusterState state;Slice slice;
boolean ruleRemoved = false;
while (System.currentTimeMillis() - finishTime < 60000) {
getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
Map<String,RoutingRule> routingRules = slice.getRoutingRules();
if (routingRules == null || routingRules.isEmpty() || !routingRules.containsKey(splitKey)) {
ruleRemoved = true;
break;
}
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", splitKey + System.currentTimeMillis());
cloudClient.add(doc);
Thread.sleep(1000);
}
return ruleRemoved;
}
示例8: doTest
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
@Override
public void doTest() throws Exception {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
Slice slice2 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
assertNotNull("Shard1 not found", slice1);
assertNotNull("Shard2 not found", slice2);
assertEquals("Shard1 is not active", Slice.ACTIVE, slice1.getState());
assertEquals("Shard2 is not active", Slice.ACTIVE, slice2.getState());
try {
deleteShard(SHARD1);
fail("Deleting an active shard should not have succeeded");
} catch (HttpSolrServer.RemoteSolrException e) {
// expected
}
setSliceState(SHARD1, Slice.INACTIVE);
clusterState = cloudClient.getZkStateReader().getClusterState();
slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
assertEquals("Shard1 is not inactive yet.", Slice.INACTIVE, slice1.getState());
deleteShard(SHARD1);
confirmShardDeletion(SHARD1);
setSliceState(SHARD2, Slice.CONSTRUCTION);
deleteShard(SHARD2);
confirmShardDeletion(SHARD2);
}
示例9: doDefensiveChecks
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private void doDefensiveChecks(DistribPhase phase) {
boolean isReplayOrPeersync = (updateCommand.getFlags() & (UpdateCommand.REPLAY | UpdateCommand.PEER_SYNC)) != 0;
if (isReplayOrPeersync) return;
String from = req.getParams().get(DISTRIB_FROM);
ClusterState clusterState = zkController.getClusterState();
CloudDescriptor cloudDescriptor = req.getCore().getCoreDescriptor().getCloudDescriptor();
Slice mySlice = clusterState.getSlice(collection, cloudDescriptor.getShardId());
boolean localIsLeader = cloudDescriptor.isLeader();
if (DistribPhase.FROMLEADER == phase && localIsLeader && from != null) { // from will be null on log replay
String fromShard = req.getParams().get(DISTRIB_FROM_PARENT);
if (fromShard != null) {
if (Slice.ACTIVE.equals(mySlice.getState())) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Request says it is coming from parent shard leader but we are in active state");
}
// shard splitting case -- check ranges to see if we are a sub-shard
Slice fromSlice = zkController.getClusterState().getCollection(collection).getSlice(fromShard);
DocRouter.Range parentRange = fromSlice.getRange();
if (parentRange == null) parentRange = new DocRouter.Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
if (mySlice.getRange() != null && !mySlice.getRange().isSubsetOf(parentRange)) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Request says it is coming from parent shard leader but parent hash range is not superset of my range");
}
} else {
String fromCollection = req.getParams().get(DISTRIB_FROM_COLLECTION); // is it because of a routing rule?
if (fromCollection == null) {
log.error("Request says it is coming from leader, but we are the leader: " + req.getParamString());
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Request says it is coming from leader, but we are the leader");
}
}
}
if ((isLeader && !localIsLeader) || (isSubShardLeader && !localIsLeader)) {
log.error("ClusterState says we are the leader, but locally we don't think so");
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"ClusterState says we are the leader (" + zkController.getBaseUrl()
+ "/" + req.getCore().getName() + "), but locally we don't think so. Request came from " + from);
}
}
示例10: doDefensiveChecks
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
private void doDefensiveChecks(DistribPhase phase) {
boolean isReplayOrPeersync = (updateCommand.getFlags() & (UpdateCommand.REPLAY | UpdateCommand.PEER_SYNC)) != 0;
if (isReplayOrPeersync) return;
String from = req.getParams().get(DISTRIB_FROM);
ClusterState clusterState = zkController.getClusterState();
CloudDescriptor cloudDescriptor = req.getCore().getCoreDescriptor().getCloudDescriptor();
Slice mySlice = clusterState.getSlice(collection, cloudDescriptor.getShardId());
boolean localIsLeader = cloudDescriptor.isLeader();
if (DistribPhase.FROMLEADER == phase && localIsLeader && from != null) { // from will be null on log replay
String fromShard = req.getParams().get(DISTRIB_FROM_PARENT);
if (fromShard != null) {
if (Slice.ACTIVE.equals(mySlice.getState())) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Request says it is coming from parent shard leader but we are in active state");
}
// shard splitting case -- check ranges to see if we are a sub-shard
Slice fromSlice = zkController.getClusterState().getCollection(collection).getSlice(fromShard);
DocRouter.Range parentRange = fromSlice.getRange();
if (parentRange == null) parentRange = new DocRouter.Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
if (mySlice.getRange() != null && !mySlice.getRange().isSubsetOf(parentRange)) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Request says it is coming from parent shard leader but parent hash range is not superset of my range");
}
} else {
String fromCollection = req.getParams().get(DISTRIB_FROM_COLLECTION); // is it because of a routing rule?
if (fromCollection == null) {
log.error("Request says it is coming from leader, but we are the leader: " + req.getParamString());
SolrException solrExc = new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Request says it is coming from leader, but we are the leader");
solrExc.setMetadata("cause", "LeaderChanged");
throw solrExc;
}
}
}
if ((isLeader && !localIsLeader) || (isSubShardLeader && !localIsLeader)) {
log.error("ClusterState says we are the leader, but locally we don't think so");
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"ClusterState says we are the leader (" + zkController.getBaseUrl()
+ "/" + req.getCore().getName() + "), but locally we don't think so. Request came from " + from);
}
}
示例11: waitToSeeReplicasActive
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
protected void waitToSeeReplicasActive(String testCollectionName, String shardId, Set<String> replicasToCheck, int maxWaitSecs) throws Exception {
long startMs = System.currentTimeMillis();
ZkStateReader zkr = cloudClient.getZkStateReader();
zkr.updateClusterState(true); // force the state to be fresh
ClusterState cs = zkr.getClusterState();
Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
boolean allReplicasUp = false;
long waitMs = 0L;
long maxWaitMs = maxWaitSecs * 1000L;
while (waitMs < maxWaitMs && !allReplicasUp) {
// refresh state every 2 secs
if (waitMs % 2000 == 0)
cloudClient.getZkStateReader().updateClusterState(true);
cs = cloudClient.getZkStateReader().getClusterState();
assertNotNull(cs);
Slice shard = cs.getSlice(testCollectionName, shardId);
assertNotNull("No Slice for "+shardId, shard);
allReplicasUp = true; // assume true
// wait to see all replicas are "active"
for (Replica replica : shard.getReplicas()) {
if (!replicasToCheck.contains(replica.getName()))
continue;
String replicaState = replica.getStr(ZkStateReader.STATE_PROP);
if (!ZkStateReader.ACTIVE.equals(replicaState)) {
log.info("Replica " + replica.getName() + " is currently " + replicaState);
allReplicasUp = false;
}
}
if (!allReplicasUp) {
try {
Thread.sleep(1000L);
} catch (Exception ignoreMe) {}
waitMs += 1000L;
}
} // end while
if (!allReplicasUp)
fail("Didn't see replicas "+ replicasToCheck +
" come up within " + maxWaitMs + " ms! ClusterState: " + printClusterStateInfo(testCollectionName));
long diffMs = (System.currentTimeMillis() - startMs);
log.info("Took " + diffMs + " ms to see replicas ["+replicasToCheck+"] become active.");
}
示例12: splitByRouteFieldTest
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
public void splitByRouteFieldTest() throws Exception {
log.info("Starting testSplitWithRouteField");
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
CloudSolrServer client = null;
String shard_fld = "shard_s";
try {
client = createCloudClient(null);
Map<String, Object> props = ZkNodeProps.makeMap(
REPLICATION_FACTOR, replicationFactor,
MAX_SHARDS_PER_NODE, maxShardsPerNode,
NUM_SLICES, numShards,
"router.field", shard_fld);
createCollection(collectionInfos, collectionName,props,client);
} finally {
if (client != null) client.shutdown();
}
List<Integer> list = collectionInfos.get(collectionName);
checkForCollection(collectionName, list, null);
waitForRecoveriesToFinish(false);
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
HttpSolrServer collectionClient = new HttpSolrServer(url);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
for (int i = 100; i <= 200; i++) {
String shardKey = "" + (char)('a' + (i % 26)); // See comment in ShardRoutingTest for hash distribution
collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
int idx = getHashRangeIdx(router, ranges, shardKey);
if (idx != -1) {
docCounts[idx]++;
}
}
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
}
collectionClient.commit();
for (int i = 0; i < 3; i++) {
try {
splitShard(collectionName, SHARD1, null, null);
break;
} catch (HttpSolrServer.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
waitForRecoveriesToFinish(collectionName, false);
assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
collectionClient.shutdown();
}
示例13: checkDocCountsAndShardStates
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
commit();
// can't use checkShardConsistency because it insists on jettys and clients for each shard
checkSubShardConsistency(SHARD1_0);
checkSubShardConsistency(SHARD1_1);
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response;
try {
response = shard1_0Server.query(query);
} finally {
shard1_0Server.shutdown();
}
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2;
try {
response2 = shard1_1Server.query(query);
} finally {
shard1_1Server.shutdown();
}
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
}
示例14: multipleShardMigrateTest
import org.apache.solr.common.cloud.ClusterState; //导入方法依赖的package包/类
protected void multipleShardMigrateTest() throws Exception {
del("*:*");
commit();
assertTrue(cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound() == 0);
final String splitKey = "a";
final int BIT_SEP = 1;
final int[] splitKeyCount = new int[1];
for (int id = 0; id < 26*3; id++) {
String shardKey = "" + (char) ('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
String key = shardKey;
if (splitKey.equals(shardKey)) {
key += "/" + BIT_SEP; // spread it over half the collection
}
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", key + "!" + id);
doc.addField("n_ti", id);
cloudClient.add(doc);
if (splitKey.equals(shardKey))
splitKeyCount[0]++;
}
assertTrue(splitKeyCount[0] > 0);
String targetCollection = "migrate_multipleshardtest_targetCollection";
createCollection(targetCollection);
Indexer indexer = new Indexer(cloudClient, splitKey, 1, 30);
indexer.start();
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), targetCollection);
HttpSolrServer collectionClient = new HttpSolrServer(url);
SolrQuery solrQuery = new SolrQuery("*:*");
assertEquals("DocCount on target collection does not match", 0, collectionClient.query(solrQuery).getResults().getNumFound());
invokeMigrateApi(AbstractDistribZkTestBase.DEFAULT_COLLECTION, splitKey + "/" + BIT_SEP + "!", targetCollection);
long finishTime = System.currentTimeMillis();
indexer.join();
splitKeyCount[0] += indexer.getSplitKeyCount();
try {
cloudClient.deleteById("a/" + BIT_SEP + "!104");
splitKeyCount[0]--;
} catch (Exception e) {
log.warn("Error deleting document a/" + BIT_SEP + "!104", e);
}
cloudClient.commit();
collectionClient.commit();
solrQuery = new SolrQuery("*:*").setRows(1000);
QueryResponse response = collectionClient.query(solrQuery);
log.info("Response from target collection: " + response);
assertEquals("DocCount on target collection does not match", splitKeyCount[0], response.getResults().getNumFound());
collectionClient.shutdown();
collectionClient = null;
getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
ClusterState state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
Slice slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
assertNotNull("Routing rule map is null", slice.getRoutingRules());
assertFalse("Routing rule map is empty", slice.getRoutingRules().isEmpty());
assertNotNull("No routing rule exists for route key: " + splitKey, slice.getRoutingRules().get(splitKey + "!"));
boolean ruleRemoved = waitForRuleToExpire(splitKey, finishTime);
assertTrue("Routing rule was not expired", ruleRemoved);
}