本文整理汇总了Java中org.elasticsearch.cluster.health.ClusterHealthStatus.RED属性的典型用法代码示例。如果您正苦于以下问题:Java ClusterHealthStatus.RED属性的具体用法?Java ClusterHealthStatus.RED怎么用?Java ClusterHealthStatus.RED使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.elasticsearch.cluster.health.ClusterHealthStatus
的用法示例。
在下文中一共展示了ClusterHealthStatus.RED属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: update
public void update(ShardRouting shardRouting) {
if (shardRouting.active()) {
active++;
if (shardRouting.primary()) {
primaryActive++;
}
if (shardRouting.relocating()) {
relocating++;
}
return;
}
if (shardRouting.primary()) {
primaryInactive++;
if (inactivePrimaryCausesRed == false) {
inactivePrimaryCausesRed = getInactivePrimaryHealth(shardRouting) == ClusterHealthStatus.RED;
}
}
if (shardRouting.initializing()) {
initializing++;
} else {
unassigned++;
}
}
示例2: status
public ClusterHealthStatus status() {
if (primaryInactive > 0) {
if (inactivePrimaryCausesRed) {
return ClusterHealthStatus.RED;
} else {
return ClusterHealthStatus.YELLOW;
}
}
if (unassigned > 0 || initializing > 0) {
return ClusterHealthStatus.YELLOW;
}
return ClusterHealthStatus.GREEN;
}
示例3: clusterReady
public boolean clusterReady() {
if (clusterReadyCache) return true;
ClusterHealthResponse chr = elasticsearchClient.admin().cluster().prepareHealth().get();
clusterReadyCache = chr.getStatus() != ClusterHealthStatus.RED;
return clusterReadyCache;
}
示例4: createInProcessNode
private Client createInProcessNode(ElasticsearchSearchIndexConfiguration config) {
Settings settings = tryReadSettingsFromFile(config);
if (settings == null) {
String homePath = config.getInProcessNodeHomePath();
checkNotNull(homePath, ElasticsearchSearchIndexConfiguration.IN_PROCESS_NODE_HOME_PATH + " is required for in process Elasticsearch node");
Map<String, String> mapSettings = new HashMap<>();
mapSettings.put("transport.type", "local");
mapSettings.put("path.home", homePath);
mapSettings.put("http.enabled", "false");
mapSettings.put("discovery.zen.ping.unicast.hosts", "localhost");
if (config.getClusterName() != null) {
mapSettings.put("cluster.name", config.getClusterName());
}
mapSettings.putAll(config.getInProcessNodeAdditionalSettings());
settings = Settings.builder()
.put(mapSettings)
.build();
}
this.inProcessNode = new Node(settings);
try {
inProcessNode.start();
} catch (NodeValidationException ex) {
throw new MemgraphException("Could not start in process node", ex);
}
Client client = inProcessNode.client();
long startTime = System.currentTimeMillis();
while (true) {
if (System.currentTimeMillis() > startTime + IN_PROCESS_NODE_WAIT_TIME_MS) {
throw new MemgraphException("Status failed to exit red status after waiting " + IN_PROCESS_NODE_WAIT_TIME_MS + "ms. Giving up.");
}
ClusterHealthResponse health = client.admin().cluster().prepareHealth().get();
if (health.getStatus() != ClusterHealthStatus.RED) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new MemgraphException("Could not sleep", e);
}
LOGGER.info("Status is %s, waiting...", health.getStatus());
}
return client;
}
示例5: testShardActiveElsewhereDoesNotDeleteAnother
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
final String masterNode = internalCluster().startMasterOnlyNode();
final List<String> nodes = internalCluster().startDataOnlyNodes(4);
final String node1 = nodes.get(0);
final String node2 = nodes.get(1);
final String node3 = nodes.get(2);
// we will use this later on, handy to start now to make sure it has a different data folder that node 1,2 &3
final String node4 = nodes.get(3);
assertAcked(prepareCreate("test").setSettings(Settings.builder()
.put(indexSettings())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", node4)
));
assertFalse(client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).setWaitForGreenStatus().setWaitForNodes("5").get().isTimedOut());
// disable allocation to control the situation more easily
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
logger.debug("--> shutting down two random nodes");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));
logger.debug("--> verifying index is red");
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
if (health.getStatus() != ClusterHealthStatus.RED) {
logClusterState();
fail("cluster didn't become red, despite of shutting 2 of 3 nodes");
}
logger.debug("--> allowing index to be assigned to node [{}]", node4);
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(
Settings.builder()
.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", "NONE")));
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")));
logger.debug("--> waiting for shards to recover on [{}]", node4);
// we have to do this in two steps as we now do async shard fetching before assigning, so the change to the
// allocation filtering may not have immediate effect
// TODO: we should add an easier to do this. It's too much of a song and dance..
Index index = resolveIndex("test");
assertBusy(new Runnable() {
@Override
public void run() {
assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index));
}
});
// wait for 4 active shards - we should have lost one shard
assertFalse(client().admin().cluster().prepareHealth().setWaitForActiveShards(4).get().isTimedOut());
// disable allocation again to control concurrency a bit and allow shard active to kick in before allocation
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
logger.debug("--> starting the two old nodes back");
internalCluster().startDataOnlyNodes(2);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")));
logger.debug("--> waiting for the lost shard to be recovered");
ensureGreen("test");
}
示例6: testCorruptPrimaryNoReplica
/**
* Tests corruption that happens on a single shard when no replicas are present. We make sure that the primary stays unassigned
* and all other replicas for the healthy shards happens
*/
public void testCorruptPrimaryNoReplica() throws ExecutionException, InterruptedException, IOException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
assertAcked(prepareCreate("test").setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
ShardRouting shardRouting = corruptRandomPrimaryFile();
/*
* we corrupted the primary shard - now lets make sure we never recover from it successfully
*/
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
boolean didClusterTurnRed = awaitBusy(() -> {
ClusterHealthStatus test = client().admin().cluster()
.health(Requests.clusterHealthRequest("test")).actionGet().getStatus();
return test == ClusterHealthStatus.RED;
}, 5, TimeUnit.MINUTES);// sometimes on slow nodes the replication / recovery is just dead slow
final ClusterHealthResponse response = client().admin().cluster()
.health(Requests.clusterHealthRequest("test")).get();
if (response.getStatus() != ClusterHealthStatus.RED) {
logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed);
logger.info("cluster state:\n{}\n{}",
client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
}
assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
for (ShardIterator iterator : shardIterators) {
ShardRouting routing;
while ((routing = iterator.nextOrNull()) != null) {
if (routing.getId() == shardRouting.getId()) {
assertThat(routing.state(), equalTo(ShardRoutingState.UNASSIGNED));
} else {
assertThat(routing.state(), anyOf(equalTo(ShardRoutingState.RELOCATING), equalTo(ShardRoutingState.STARTED)));
}
}
}
final List<Path> files = listShardFiles(shardRouting);
Path corruptedFile = null;
for (Path file : files) {
if (file.getFileName().toString().startsWith("corrupted_")) {
corruptedFile = file;
break;
}
}
assertThat(corruptedFile, notNullValue());
}