本文整理汇总了Java中org.apache.solr.common.cloud.ZkCoreNodeProps.getCoreUrl方法的典型用法代码示例。如果您正苦于以下问题:Java ZkCoreNodeProps.getCoreUrl方法的具体用法?Java ZkCoreNodeProps.getCoreUrl怎么用?Java ZkCoreNodeProps.getCoreUrl使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.solr.common.cloud.ZkCoreNodeProps
的用法示例。
在下文中一共展示了ZkCoreNodeProps.getCoreUrl方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getUrlFromZk
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
public static String getUrlFromZk(ClusterState clusterState, String collection) {
Map<String,Slice> slices = clusterState.getCollection(collection).getSlicesMap();
if (slices == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
}
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Slice slice = entry.getValue();
Map<String,Replica> shards = slice.getReplicasMap();
Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
for (Map.Entry<String,Replica> shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
}
}
}
throw new RuntimeException("Could not find a live node for collection:" + collection);
}
示例2: getLeaderUrl
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private String getLeaderUrl(final String collection, final String slice)
throws KeeperException, InterruptedException {
int iterCount = 60;
while (iterCount-- > 0) {
try {
byte[] data = zkClient.getData(
ZkStateReader.getShardLeadersPath(collection, slice), null, null,
true);
ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(
ZkNodeProps.load(data));
return leaderProps.getCoreUrl();
} catch (NoNodeException e) {
Thread.sleep(500);
}
}
zkClient.printLayoutToStdOut();
throw new RuntimeException("Could not get leader props");
}
示例3: getUrlFromZk
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private String getUrlFromZk(String collection) {
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
if (slices == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
}
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Slice slice = entry.getValue();
Map<String,Replica> shards = slice.getReplicasMap();
Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
for (Map.Entry<String,Replica> shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
}
}
}
throw new RuntimeException("Could not find a live node for collection:" + collection);
}
示例4: getUrlFromZk
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private String getUrlFromZk(String collection) {
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
Map<String,Slice> slices = clusterState.getCollectionStates().get(collection).getSlicesMap();
if (slices == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection:" + collection);
}
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Slice slice = entry.getValue();
Map<String,Replica> shards = slice.getReplicasMap();
Set<Map.Entry<String,Replica>> shardEntries = shards.entrySet();
for (Map.Entry<String,Replica> shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
if (clusterState.liveNodesContain(node.getStr(ZkStateReader.NODE_NAME_PROP))) {
return ZkCoreNodeProps.getCoreUrl(node.getStr(ZkStateReader.BASE_URL_PROP), collection); //new ZkCoreNodeProps(node).getCoreUrl();
}
}
}
throw new RuntimeException("Could not find a live node for collection:" + collection);
}
示例5: buildUrlMap
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private Map<String,List<String>> buildUrlMap(DocCollection col) {
Map<String, List<String>> urlMap = new HashMap<>();
Collection<Slice> slices = col.getActiveSlices();
Iterator<Slice> sliceIterator = slices.iterator();
while (sliceIterator.hasNext()) {
Slice slice = sliceIterator.next();
String name = slice.getName();
List<String> urls = new ArrayList<>();
Replica leader = slice.getLeader();
if (leader == null) {
// take unoptimized general path - we cannot find a leader yet
return null;
}
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader);
String url = zkProps.getCoreUrl();
urls.add(url);
Collection<Replica> replicas = slice.getReplicas();
Iterator<Replica> replicaIterator = replicas.iterator();
while (replicaIterator.hasNext()) {
Replica replica = replicaIterator.next();
if (!replica.getNodeName().equals(leader.getNodeName()) &&
!replica.getName().equals(leader.getName())) {
ZkCoreNodeProps zkProps1 = new ZkCoreNodeProps(replica);
String url1 = zkProps1.getCoreUrl();
urls.add(url1);
}
}
urlMap.put(name, urls);
}
return urlMap;
}
示例6: getCoreUrl
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private String getCoreUrl(CoreContainer cores, String collectionName,
String origCorename, ClusterState clusterState, Collection<Slice> slices,
boolean byCoreName, boolean activeReplicas) {
String coreUrl;
Set<String> liveNodes = clusterState.getLiveNodes();
Iterator<Slice> it = slices.iterator();
while (it.hasNext()) {
Slice slice = it.next();
Map<String,Replica> sliceShards = slice.getReplicasMap();
for (ZkNodeProps nodeProps : sliceShards.values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
if (!activeReplicas || (liveNodes.contains(coreNodeProps.getNodeName())
&& coreNodeProps.getState().equals(ZkStateReader.ACTIVE))) {
if (byCoreName && !collectionName.equals(coreNodeProps.getCoreName())) {
// if it's by core name, make sure they match
continue;
}
if (coreNodeProps.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
if (origCorename != null) {
coreUrl = coreNodeProps.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = coreNodeProps.getCoreUrl();
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
}
return coreUrl;
}
}
}
return null;
}
示例7: replicate
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
throws SolrServerException, IOException {
ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
String leaderUrl = leaderCNodeProps.getCoreUrl();
log.info("Attempting to replicate from " + leaderUrl + ". core=" + coreName);
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
if (handler instanceof LazyRequestHandlerWrapper) {
handler = ((LazyRequestHandlerWrapper) handler).getWrappedHandler();
}
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
if (isClosed()) return; // we check closed on return
boolean success = replicationHandler.doFetch(solrParams, false);
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Replication for recovery failed.");
}
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core
.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try {
log.debug(core.getCoreDescriptor().getCoreContainer()
.getZkController().getNodeName()
+ " replicated "
+ searcher.search(new MatchAllDocsQuery(), 1).totalHits
+ " from "
+ leaderUrl
+ " gen:"
+ core.getDeletionPolicy().getLatestCommit().getGeneration()
+ " data:" + core.getDataDir()
+ " index:" + core.getIndexDir()
+ " newIndex:" + core.getNewIndexDir()
+ " files:" + Arrays.asList(dir.listAll()));
} finally {
core.getDirectoryFactory().release(dir);
searchHolder.decref();
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
}
}
}
示例8: multiShardTest
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private void multiShardTest() throws Exception {
// create a collection that has 1 shard and 3 replicas
String testCollectionName = "c8n_2x2_commits";
createCollection(testCollectionName, 2, 2, 1);
cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders =
ensureAllReplicasAreActive(testCollectionName, "shard1", 2, 2, 30);
assertTrue("Expected 1 replicas for collection " + testCollectionName
+ " but found " + notLeaders.size() + "; clusterState: "
+ printClusterStateInfo(),
notLeaders.size() == 1);
// let's put the leader in it's own partition, no replicas can contact it now
Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
SocketProxy leaderProxy = getProxyForReplica(leader);
leaderProxy.close();
// let's find the leader of shard2 and ask him to commit
Replica shard2Leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard2");
HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(shard2Leader.getStr("base_url"), shard2Leader.getStr("core")));
server.commit();
Thread.sleep(sleepMsBeforeHealPartition);
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
assertEquals("Leader was not active", "active", leader.getStr("state"));
leaderProxy.reopen();
Thread.sleep(sleepMsBeforeHealPartition);
// try to clean up
try {
CollectionAdminRequest req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
}
示例9: oneShardTest
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private void oneShardTest() throws Exception {
// create a collection that has 1 shard and 3 replicas
String testCollectionName = "c8n_1x3_commits";
createCollection(testCollectionName, 1, 3, 1);
cloudClient.setDefaultCollection(testCollectionName);
List<Replica> notLeaders =
ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 3, 30);
assertTrue("Expected 2 replicas for collection " + testCollectionName
+ " but found " + notLeaders.size() + "; clusterState: "
+ printClusterStateInfo(),
notLeaders.size() == 2);
// let's put the leader in it's own partition, no replicas can contact it now
Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
SocketProxy leaderProxy = getProxyForReplica(leader);
leaderProxy.close();
Replica replica = notLeaders.get(0);
HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(replica.getStr("base_url"), replica.getStr("core")));
server.commit();
Thread.sleep(sleepMsBeforeHealPartition);
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
assertEquals("Leader was not active", "active", leader.getStr("state"));
leaderProxy.reopen();
Thread.sleep(sleepMsBeforeHealPartition);
// try to clean up
try {
CollectionAdminRequest req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
}
示例10: testLeaderInitiatedRecoveryCRUD
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
/**
* Tests handling of lir state znodes.
*/
protected void testLeaderInitiatedRecoveryCRUD() throws Exception {
String testCollectionName = "c8n_crud_1x2";
String shardId = "shard1";
createCollection(testCollectionName, 1, 2, 1);
cloudClient.setDefaultCollection(testCollectionName);
Replica leader =
cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
CoreContainer cores = ((SolrDispatchFilter)leaderJetty.getDispatchFilter().getFilter()).getCores();
ZkController zkController = cores.getZkController();
assertNotNull("ZkController is null", zkController);
Replica notLeader =
ensureAllReplicasAreActive(testCollectionName, shardId, 1, 2, maxWaitSecsToSeeAllActive).get(0);
ZkCoreNodeProps replicaCoreNodeProps = new ZkCoreNodeProps(notLeader);
String replicaUrl = replicaCoreNodeProps.getCoreUrl();
assertTrue(!zkController.isReplicaInRecoveryHandling(replicaUrl));
assertTrue(zkController.ensureReplicaInLeaderInitiatedRecovery(testCollectionName, shardId, replicaUrl, replicaCoreNodeProps, false));
assertTrue(zkController.isReplicaInRecoveryHandling(replicaUrl));
Map<String,Object> lirStateMap = zkController.getLeaderInitiatedRecoveryStateObject(testCollectionName, shardId, notLeader.getName());
assertNotNull(lirStateMap);
assertEquals(ZkStateReader.DOWN, lirStateMap.get("state"));
zkController.removeReplicaFromLeaderInitiatedRecoveryHandling(replicaUrl);
assertTrue(!zkController.isReplicaInRecoveryHandling(replicaUrl));
// test old non-json format handling
SolrZkClient zkClient = zkController.getZkClient();
String znodePath = zkController.getLeaderInitiatedRecoveryZnodePath(testCollectionName, shardId, notLeader.getName());
zkClient.setData(znodePath, "down".getBytes(StandardCharsets.UTF_8), true);
lirStateMap = zkController.getLeaderInitiatedRecoveryStateObject(testCollectionName, shardId, notLeader.getName());
assertNotNull(lirStateMap);
assertEquals(ZkStateReader.DOWN, lirStateMap.get("state"));
zkClient.delete(znodePath, -1, false);
// try to clean up
try {
CollectionAdminRequest req = new CollectionAdminRequest.Delete();
req.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
}
示例11: checkDocCountsAndShardStates
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws Exception {
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
commit();
// can't use checkShardConsistency because it insists on jettys and clients for each shard
checkSubShardConsistency(SHARD1_0);
checkSubShardConsistency(SHARD1_1);
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response;
try {
response = shard1_0Server.query(query);
} finally {
shard1_0Server.shutdown();
}
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2;
try {
response2 = shard1_1Server.query(query);
} finally {
shard1_1Server.shutdown();
}
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0. See SOLR-5309", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1. See SOLR-5309", docCounts[1], shard11Count);
}
示例12: getRemotCoreUrl
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private String getRemotCoreUrl(CoreContainer cores, String collectionName, String origCorename) {
ClusterState clusterState = cores.getZkController().getClusterState();
Collection<Slice> slices = clusterState.getSlices(collectionName);
boolean byCoreName = false;
if (slices == null) {
// look by core name
byCoreName = true;
Set<String> collections = clusterState.getCollections();
for (String collection : collections) {
slices = new ArrayList<Slice>();
slices.addAll(clusterState.getSlices(collection));
}
}
if (slices == null || slices.size() == 0) {
return null;
}
Set<String> liveNodes = clusterState.getLiveNodes();
Iterator<Slice> it = slices.iterator();
while (it.hasNext()) {
Slice slice = it.next();
Map<String,Replica> sliceShards = slice.getReplicasMap();
for (ZkNodeProps nodeProps : sliceShards.values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
if (liveNodes.contains(coreNodeProps.getNodeName())
&& coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) {
if (byCoreName && !collectionName.equals(coreNodeProps.getCoreName())) {
// if it's by core name, make sure they match
continue;
}
if (coreNodeProps.getBaseUrl().equals(cores.getZkController().getBaseUrl())) {
// don't count a local core
continue;
}
String coreUrl;
if (origCorename != null) {
coreUrl = coreNodeProps.getBaseUrl() + "/" + origCorename;
} else {
coreUrl = coreNodeProps.getCoreUrl();
if (coreUrl.endsWith("/")) {
coreUrl = coreUrl.substring(0, coreUrl.length() - 1);
}
}
return coreUrl;
}
}
}
return null;
}
示例13: replicate
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops, String baseUrl)
throws SolrServerException, IOException {
String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
String leaderUrl = leaderCNodeProps.getCoreUrl();
log.info("Attempting to replicate from " + leaderUrl + ". core=" + coreName);
// if we are the leader, either we are trying to recover faster
// then our ephemeral timed out or we are the only node
if (!leaderBaseUrl.equals(baseUrl)) {
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
if (handler instanceof LazyRequestHandlerWrapper) {
handler = ((LazyRequestHandlerWrapper)handler).getWrappedHandler();
}
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
if (isClosed()) retries = INTERRUPTED;
boolean success = replicationHandler.doFetch(solrParams, false);
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
}
// solrcloud_debug
// try {
// RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
// SolrIndexSearcher searcher = searchHolder.get();
// try {
// System.out.println(core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName() + " replicated "
// + searcher.search(new MatchAllDocsQuery(), 1).totalHits + " from " + leaderUrl + " gen:" + core.getDeletionPolicy().getLatestCommit().getGeneration() + " data:" + core.getDataDir());
// } finally {
// searchHolder.decref();
// }
// } catch (Exception e) {
//
// }
}
}
示例14: replicate
import org.apache.solr.common.cloud.ZkCoreNodeProps; //导入方法依赖的package包/类
private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
throws SolrServerException, IOException {
ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
String leaderUrl = leaderCNodeProps.getCoreUrl();
log.info("Attempting to replicate from " + leaderUrl + ". core=" + coreName);
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
if (handler instanceof LazyRequestHandlerWrapper) {
handler = ((LazyRequestHandlerWrapper) handler).getWrappedHandler();
}
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
if (isClosed()) retries = INTERRUPTED;
boolean success = replicationHandler.doFetch(solrParams, false);
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Replication for recovery failed.");
}
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core
.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try {
log.debug(core.getCoreDescriptor().getCoreContainer()
.getZkController().getNodeName()
+ " replicated "
+ searcher.search(new MatchAllDocsQuery(), 1).totalHits
+ " from "
+ leaderUrl
+ " gen:"
+ core.getDeletionPolicy().getLatestCommit().getGeneration()
+ " data:" + core.getDataDir()
+ " index:" + core.getIndexDir()
+ " newIndex:" + core.getNewIndexDir()
+ " files:" + Arrays.asList(dir.listAll()));
} finally {
core.getDirectoryFactory().release(dir);
searchHolder.decref();
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
}
}
}