本文整理汇总了Java中org.apache.solr.common.cloud.Replica.getStr方法的典型用法代码示例。如果您正苦于以下问题:Java Replica.getStr方法的具体用法?Java Replica.getStr怎么用?Java Replica.getStr使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.solr.common.cloud.Replica
的用法示例。
在下文中一共展示了Replica.getStr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAssignedCoreNodeName
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private String getAssignedCoreNodeName(ClusterState state, ZkNodeProps message) {
Collection<Slice> slices = state.getSlices(message.getStr(ZkStateReader.COLLECTION_PROP));
if (slices != null) {
for (Slice slice : slices) {
for (Replica replica : slice.getReplicas()) {
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String msgNodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
String msgCore = message.getStr(ZkStateReader.CORE_NAME_PROP);
if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
return replica.getName();
}
}
}
}
return null;
}
示例2: getActiveOrRecoveringReplicas
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
protected List<Replica> getActiveOrRecoveringReplicas(String testCollectionName, String shardId) throws Exception {
Map<String,Replica> activeReplicas = new HashMap<String,Replica>();
ZkStateReader zkr = cloudClient.getZkStateReader();
ClusterState cs = zkr.getClusterState();
assertNotNull(cs);
for (Slice shard : cs.getActiveSlices(testCollectionName)) {
if (shard.getName().equals(shardId)) {
for (Replica replica : shard.getReplicas()) {
String replicaState = replica.getStr(ZkStateReader.STATE_PROP);
if (ZkStateReader.ACTIVE.equals(replicaState) || ZkStateReader.RECOVERING.equals(replicaState)) {
activeReplicas.put(replica.getName(), replica);
}
}
}
}
List<Replica> replicas = new ArrayList<Replica>();
replicas.addAll(activeReplicas.values());
return replicas;
}
示例3: getProxyForReplica
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
assertNotNull(replicaBaseUrl);
URL baseUrl = new URL(replicaBaseUrl);
SocketProxy proxy = proxies.get(baseUrl.toURI());
if (proxy == null && !baseUrl.toExternalForm().endsWith("/")) {
baseUrl = new URL(baseUrl.toExternalForm() + "/");
proxy = proxies.get(baseUrl.toURI());
}
assertNotNull("No proxy found for " + baseUrl + "!", proxy);
return proxy;
}
示例4: waitForCoreNodeName
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private void waitForCoreNodeName(CoreDescriptor descriptor) {
int retryCount = 320;
log.info("look for our core node name");
while (retryCount-- > 0) {
Map<String,Slice> slicesMap = zkStateReader.getClusterState()
.getSlicesMap(descriptor.getCloudDescriptor().getCollectionName());
if (slicesMap != null) {
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
// TODO: for really large clusters, we could 'index' on this
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String msgNodeName = getNodeName();
String msgCore = descriptor.getName();
if (msgNodeName.equals(nodeName) && core.equals(msgCore)) {
descriptor.getCloudDescriptor()
.setCoreNodeName(replica.getName());
return;
}
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
示例5: findDownReplicasInSlice
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private static int findDownReplicasInSlice(ClusterState clusterState, DocCollection collection, Slice slice, final Collection<DownReplica> badReplicas) {
int goodReplicas = 0;
Collection<Replica> replicas = slice.getReplicas();
if (replicas != null) {
for (Replica replica : replicas) {
// on a live node?
boolean live = clusterState.liveNodesContain(replica.getNodeName());
String state = replica.getStr(ZkStateReader.STATE_PROP);
boolean okayState = (state.equals(ZkStateReader.DOWN)
|| state.equals(ZkStateReader.RECOVERING) || state
.equals(ZkStateReader.ACTIVE));
log.debug("Process replica name={} live={} state={}", replica.getName(), live, state);
if (live && okayState) {
goodReplicas++;
} else {
DownReplica badReplica = new DownReplica();
badReplica.replica = replica;
badReplica.slice = slice;
badReplica.collection = collection;
badReplicas.add(badReplica);
}
}
}
log.debug("bad replicas for slice {}", badReplicas);
return goodReplicas;
}
示例6: deleteCoreNode
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private void deleteCoreNode(String collectionName, String replicaName, Replica replica, String core) throws KeeperException, InterruptedException {
ZkNodeProps m = new ZkNodeProps(
Overseer.QUEUE_OPERATION, Overseer.DELETECORE,
ZkStateReader.CORE_NAME_PROP, core,
ZkStateReader.NODE_NAME_PROP, replica.getStr(ZkStateReader.NODE_NAME_PROP),
ZkStateReader.COLLECTION_PROP, collectionName,
ZkStateReader.CORE_NODE_NAME_PROP, replicaName);
Overseer.getInQueue(zkStateReader.getZkClient()).offer(ZkStateReader.toJSON(m));
}
示例7: waitForCoreNodeName
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private String waitForCoreNodeName(String collectionName, String msgNodeName, String msgCore) {
int retryCount = 320;
while (retryCount-- > 0) {
Map<String,Slice> slicesMap = zkStateReader.getClusterState()
.getSlicesMap(collectionName);
if (slicesMap != null) {
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
// TODO: for really large clusters, we could 'index' on this
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
return replica.getName();
}
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
}
示例8: deleteLiveReplicaTest
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private void deleteLiveReplicaTest() throws Exception {
String collectionName = "delLiveColl";
CloudSolrServer client = createCloudClient(null);
try {
createCollection(collectionName, client);
waitForRecoveriesToFinish(collectionName, false);
DocCollection testcoll = getCommonCloudSolrServer().getZkStateReader()
.getClusterState().getCollection(collectionName);
Slice shard1 = null;
Replica replica1 = null;
for (Slice slice : testcoll.getSlices()) {
if ("active".equals(slice.getStr("state"))) {
shard1 = slice;
for (Replica replica : shard1.getReplicas())
if ("active".equals(replica.getStr("state"))) replica1 = replica;
}
}
if (replica1 == null) fail("no active replicas found");
HttpSolrServer replica1Server = new HttpSolrServer(replica1.getStr("base_url"));
String dataDir = null;
try {
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), replica1Server);
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
dataDir = (String) coreStatus.get("dataDir");
} finally {
replica1Server.shutdown();
}
removeAndWaitForReplicaGone(collectionName, client, replica1,
shard1.getName());
assertFalse("dataDir for " + replica1.getName() + " should have been deleted by deleteReplica API", new File(dataDir).exists());
} finally {
client.shutdown();
}
}
示例9: getProxyForReplica
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
protected SocketProxy getProxyForReplica(Replica replica) throws Exception {
String replicaBaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
assertNotNull(replicaBaseUrl);
URL baseUrl = new URL(replicaBaseUrl);
SocketProxy proxy = proxies.get(baseUrl.toURI());
if (proxy == null && !baseUrl.toExternalForm().endsWith("/")) {
baseUrl = new URL(baseUrl.toExternalForm() + "/");
proxy = proxies.get(baseUrl.toURI());
}
assertNotNull("No proxy found for " + baseUrl + "!", proxy);
return proxy;
}
示例10: checkCollectionExpectations
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private String checkCollectionExpectations(String collectionName, List<Integer> numShardsNumReplicaList, List<String> nodesAllowedToRunShards) {
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
int expectedSlices = numShardsNumReplicaList.get(0);
// The Math.min thing is here, because we expect replication-factor to be reduced to if there are not enough live nodes to spread all shards of a collection over different nodes
int expectedShardsPerSlice = numShardsNumReplicaList.get(1);
int expectedTotalShards = expectedSlices * expectedShardsPerSlice;
Map<String,DocCollection> collections = clusterState
.getCollectionStates();
if (collections.containsKey(collectionName)) {
Map<String,Slice> slices = collections.get(collectionName).getSlicesMap();
// did we find expectedSlices slices/shards?
if (slices.size() != expectedSlices) {
return "Found new collection " + collectionName + ", but mismatch on number of slices. Expected: " + expectedSlices + ", actual: " + slices.size();
}
int totalShards = 0;
for (String sliceName : slices.keySet()) {
for (Replica replica : slices.get(sliceName).getReplicas()) {
if (nodesAllowedToRunShards != null && !nodesAllowedToRunShards.contains(replica.getStr(ZkStateReader.NODE_NAME_PROP))) {
return "Shard " + replica.getName() + " created on node " + replica.getStr(ZkStateReader.NODE_NAME_PROP) + " not allowed to run shards for the created collection " + collectionName;
}
}
totalShards += slices.get(sliceName).getReplicas().size();
}
if (totalShards != expectedTotalShards) {
return "Found new collection " + collectionName + " with correct number of slices, but mismatch on number of shards. Expected: " + expectedTotalShards + ", actual: " + totalShards;
}
return null;
} else {
return "Could not find new collection " + collectionName;
}
}
示例11: waitForCoreNodeName
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private void waitForCoreNodeName(CoreDescriptor descriptor) {
int retryCount = 320;
log.info("look for our core node name");
while (retryCount-- > 0) {
Map<String,Slice> slicesMap = zkStateReader.getClusterState()
.getSlicesMap(descriptor.getCloudDescriptor().getCollectionName());
if (slicesMap != null) {
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
// TODO: for really large clusters, we could 'index' on this
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String msgNodeName = getNodeName();
String msgCore = descriptor.getName();
if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
descriptor.getCloudDescriptor()
.setCoreNodeName(replica.getName());
return;
}
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
示例12: waitForCoreNodeName
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
private String waitForCoreNodeName(DocCollection collection, String msgNodeName, String msgCore) {
int retryCount = 320;
while (retryCount-- > 0) {
Map<String,Slice> slicesMap = zkStateReader.getClusterState()
.getSlicesMap(collection.getName());
if (slicesMap != null) {
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
// TODO: for really large clusters, we could 'index' on this
String nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
if (nodeName.equals(msgNodeName) && core.equals(msgCore)) {
return replica.getName();
}
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
}
示例13: create
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
static ReplicaInfo create(Replica replica) {
return new AutoValue_SolrIO_ReplicaInfo(
replica.getStr(ZkStateReader.CORE_NAME_PROP),
replica.getCoreUrl(),
replica.getStr(ZkStateReader.BASE_URL_PROP));
}
示例14: ensureAllReplicasAreActive
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
protected static void ensureAllReplicasAreActive(String testCollectionName, int shards, int rf, int maxWaitSecs)
throws Exception {
long startMs = System.currentTimeMillis();
ZkStateReader zkr = cloudSolrClient.getZkStateReader();
zkr.updateClusterState(); // force the state to be fresh
ClusterState cs = zkr.getClusterState();
Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
assertTrue(slices.size() == shards);
boolean allReplicasUp = false;
long waitMs = 0L;
long maxWaitMs = maxWaitSecs * 1000L;
Replica leader = null;
while (waitMs < maxWaitMs && !allReplicasUp) {
// refresh state every 2 secs
if (waitMs % 2000 == 0) {
log.info("Updating ClusterState");
cloudSolrClient.getZkStateReader().updateClusterState();
}
cs = cloudSolrClient.getZkStateReader().getClusterState();
assertNotNull(cs);
allReplicasUp = true; // assume true
for (Slice shard : cs.getActiveSlices(testCollectionName)) {
String shardId = shard.getName();
assertNotNull("No Slice for " + shardId, shard);
Collection<Replica> replicas = shard.getReplicas();
assertTrue(replicas.size() == rf);
leader = shard.getLeader();
assertNotNull(leader);
log.info("Found " + replicas.size() + " replicas and leader on " +
leader.getNodeName() + " for " + shardId + " in " + testCollectionName);
// ensure all replicas are "active"
for (Replica replica : replicas) {
String replicaState = replica.getStr(ZkStateReader.STATE_PROP);
if (!"active".equals(replicaState)) {
log.info("Replica " + replica.getName() + " for shard " + shardId + " is currently " + replicaState);
allReplicasUp = false;
}
}
}
if (!allReplicasUp) {
try {
Thread.sleep(500L);
} catch (Exception ignoreMe) {
}
waitMs += 500L;
}
} // end while
if (!allReplicasUp)
fail("Didn't see all replicas for " + testCollectionName +
" come up within " + maxWaitMs + " ms! ClusterState: " + printClusterStateInfo(testCollectionName));
long diffMs = (System.currentTimeMillis() - startMs);
log.info("Took " + diffMs + " ms to see all replicas become active for " + testCollectionName);
}
示例15: checkSharedFSFailoverReplaced
import org.apache.solr.common.cloud.Replica; //导入方法依赖的package包/类
/**
* See if coreNodeName has been taken over by another baseUrl and unload core
* + throw exception if it has been.
*/
public static void checkSharedFSFailoverReplaced(CoreContainer cc, CoreDescriptor desc) {
ZkController zkController = cc.getZkController();
String thisCnn = zkController.getCoreNodeName(desc);
String thisBaseUrl = zkController.getBaseUrl();
log.debug("checkSharedFSFailoverReplaced running for coreNodeName={} baseUrl={}", thisCnn, thisBaseUrl);
// if we see our core node name on a different base url, unload
Map<String,Slice> slicesMap = zkController.getClusterState().getSlicesMap(desc.getCloudDescriptor().getCollectionName());
if (slicesMap != null) {
for (Slice slice : slicesMap.values()) {
for (Replica replica : slice.getReplicas()) {
String cnn = replica.getName();
String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
log.debug("compare against coreNodeName={} baseUrl={}", cnn, baseUrl);
if (thisCnn != null && thisCnn.equals(cnn)
&& !thisBaseUrl.equals(baseUrl)) {
if (cc.getCoreNames().contains(desc.getName())) {
cc.unload(desc.getName());
}
File instanceDir = new File(desc.getInstanceDir());
try {
FileUtils.deleteDirectory(instanceDir);
} catch (IOException e) {
SolrException.log(log, "Failed to delete instance dir for core:"
+ desc.getName() + " dir:" + instanceDir.getAbsolutePath());
}
log.error("", new SolrException(ErrorCode.SERVER_ERROR,
"Will not load SolrCore " + desc.getName()
+ " because it has been replaced due to failover."));
throw new SolrException(ErrorCode.SERVER_ERROR,
"Will not load SolrCore " + desc.getName()
+ " because it has been replaced due to failover.");
}
}
}
}
}