当前位置: 首页>>代码示例>>Java>>正文


Java ZenDiscovery类代码示例

本文整理汇总了Java中org.elasticsearch.discovery.zen.ZenDiscovery的典型用法代码示例。如果您正苦于以下问题:Java ZenDiscovery类的具体用法?Java ZenDiscovery怎么用?Java ZenDiscovery使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ZenDiscovery类属于org.elasticsearch.discovery.zen包,在下文中一共展示了ZenDiscovery类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertDiscoveryCompleted

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
private void assertDiscoveryCompleted(List<String> nodes) throws InterruptedException {
    for (final String node : nodes) {
        assertTrue(
            "node [" + node + "] is still joining master",
            awaitBusy(
                () -> !((ZenDiscovery) internalCluster().getInstance(Discovery.class, node)).joiningCluster(),
                30,
                TimeUnit.SECONDS
            )
        );
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:13,代码来源:DiscoveryWithServiceDisruptionsIT.java

示例2: testDynamicUpdateMinimumMasterNodes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testDynamicUpdateMinimumMasterNodes() throws Exception {
    Settings settings = Settings.builder()
            .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
            .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), "1")
            .build();

    logger.info("--> start first node and wait for it to be a master");
    internalCluster().startNode(settings);
    ensureClusterSizeConsistency();

    // wait until second node join the cluster
    logger.info("--> start second node and wait for it to join");
    internalCluster().startNode(settings);
    ensureClusterSizeConsistency();

    logger.info("--> setting minimum master node to 2");
    setMinimumMasterNodes(2);

    // make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
    for (Client client : internalCluster().getClients()) {
        assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
                equalTo(false));
    }

    logger.info("--> stopping a node");
    internalCluster().stopRandomDataNode();
    logger.info("--> verifying min master node has effect");
    assertNoMasterBlockOnAllNodes();

    logger.info("--> bringing another node up");
    internalCluster().startNode(Settings.builder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build());
    ensureClusterSizeConsistency();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:34,代码来源:MinimumMasterNodesIT.java

示例3: testCanNotBringClusterDown

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testCanNotBringClusterDown() throws ExecutionException, InterruptedException {
    int nodeCount = scaledRandomIntBetween(1, 5);
    Settings.Builder settings = Settings.builder()
            .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
            .put("discovery.initial_state_timeout", "500ms");

    // set an initial value which is at least quorum to avoid split brains during initial startup
    int initialMinMasterNodes = randomIntBetween(nodeCount / 2 + 1, nodeCount);
    settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), initialMinMasterNodes);


    logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes);
    internalCluster().startNodes(nodeCount, settings.build());

    logger.info("--> waiting for nodes to join");
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());

    int updateCount = randomIntBetween(1, nodeCount);

    logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount);
    assertAcked(client().admin().cluster().prepareUpdateSettings()
            .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)));

    logger.info("--> verifying no node left and master is up");
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());

    updateCount = nodeCount + randomIntBetween(1, 2000);
    logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount);
    try {
        client().admin().cluster().prepareUpdateSettings()
                .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount));
    } catch (IllegalArgumentException ex) {
        assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]");
    }

    logger.info("--> verifying no node left and master is up");
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:39,代码来源:MinimumMasterNodesIT.java

示例4: getDiscoveryTypes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
@Override
public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
                                                          NamedWriteableRegistry namedWriteableRegistry,
                                                          ClusterService clusterService, UnicastHostsProvider hostsProvider) {
    // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider
    return Collections.singletonMap(EC2, () ->
        new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:Ec2DiscoveryPlugin.java

示例5: getDiscoveryTypes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
@Override
public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
                                                          NamedWriteableRegistry namedWriteableRegistry,
                                                          ClusterService clusterService, UnicastHostsProvider hostsProvider) {
    // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider
    return Collections.singletonMap(AZURE, () ->
        new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:AzureDiscoveryPlugin.java

示例6: nodeSettings

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
@Override
protected Settings nodeSettings(int nodeOrdinal) {
    Settings.Builder builder = Settings.builder()
            .put(super.nodeSettings(nodeOrdinal))
            .put("discovery.zen.minimum_master_nodes", 2)
            // Make the test run faster
            .put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "50ms")
            .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "10ms")
            .put("discovery.initial_state_timeout", "100ms");
    return builder.build();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:12,代码来源:AzureMinimumMasterNodesTests.java

示例7: getDiscoveryTypes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
@Override
public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
                                                          NamedWriteableRegistry namedWriteableRegistry,
                                                          ClusterService clusterService, UnicastHostsProvider hostsProvider) {
    // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider
    return Collections.singletonMap(GCE, () ->
        new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:9,代码来源:GceDiscoveryPlugin.java

示例8: DiscoveryModule

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public DiscoveryModule(Settings settings) {
    this.settings = settings;
    addDiscoveryType("local", LocalDiscovery.class);
    addDiscoveryType("zen", ZenDiscovery.class);
    addDiscoveryType("dl", DLDiscovery.class);
    addElectMasterService("zen", ElectMasterService.class);
    // always add the unicast hosts, or things get angry!
    addZenPing(UnicastZenPing.class);
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:10,代码来源:DiscoveryModule.java

示例9: getDiscoveryTypes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
@Override
public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
                                                          NamedWriteableRegistry namedWriteableRegistry, ClusterService clusterService, UnicastHostsProvider hostsProvider) {
  // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider
  return Collections.singletonMap(KUBERNETES, () ->
    new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, clusterService, hostsProvider));
}
 
开发者ID:fabric8io,项目名称:elasticsearch-cloud-kubernetes,代码行数:8,代码来源:KubernetesDiscoveryPlugin.java

示例10: afterInternal

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
protected final void afterInternal(boolean afterClass) throws Exception {
    boolean success = false;
    try {
        final Scope currentClusterScope = getCurrentClusterScope();
        clearDisruptionScheme();
        try {
            if (cluster() != null) {
                if (currentClusterScope != Scope.TEST) {
                    MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
                    final Map<String, String> persistent = metaData.persistentSettings().getAsMap();
                    assertThat("test leaves persistent cluster metadata behind: " + persistent, persistent.size(), equalTo(0));
                    final Map<String, String> transientSettings =  new HashMap<>(metaData.transientSettings().getAsMap());
                    if (isInternalCluster() && internalCluster().getAutoManageMinMasterNode()) {
                        // this is set by the test infra
                        transientSettings.remove(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey());
                    }
                    assertThat("test leaves transient cluster metadata behind: " + transientSettings,
                        transientSettings.keySet(), empty());
                }
                ensureClusterSizeConsistency();
                ensureClusterStateConsistency();
                if (isInternalCluster()) {
                    // check no pending cluster states are leaked
                    for (Discovery discovery : internalCluster().getInstances(Discovery.class)) {
                        if (discovery instanceof ZenDiscovery) {
                            final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery;
                            assertBusy(() -> {
                                final ClusterState[] states = zenDiscovery.pendingClusterStates();
                                assertThat(zenDiscovery.localNode().getName() + " still having pending states:\n" +
                                        Stream.of(states).map(ClusterState::toString).collect(Collectors.joining("\n")),
                                    states, emptyArray());
                            });
                        }
                    }
                }
                beforeIndexDeletion();
                cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete
                if (afterClass || currentClusterScope == Scope.TEST) {
                    cluster().close();
                }
                cluster().assertAfterTest();
            }
        } finally {
            if (currentClusterScope == Scope.TEST) {
                clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
            }
        }
        success = true;
    } finally {
        if (!success) {
            // if we failed here that means that something broke horribly so we should clear all clusters
            // TODO: just let the exception happen, WTF is all this horseshit
            // afterTestRule.forceFailure();
        }
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:57,代码来源:ESIntegTestCase.java

示例11: testDefaults

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testDefaults() {
    DiscoveryModule module = newModule(Settings.EMPTY, Collections.emptyList());
    assertTrue(module.getDiscovery() instanceof ZenDiscovery);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:5,代码来源:DiscoveryModuleTests.java

示例12: testMultipleNodesShutdownNonMasterNodes

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testMultipleNodesShutdownNonMasterNodes() throws Exception {
    Settings settings = Settings.builder()
            .put("discovery.zen.minimum_master_nodes", 3)
            .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s")
            .put("discovery.initial_state_timeout", "500ms")
            .build();

    logger.info("--> start first 2 nodes");
    internalCluster().startNodes(2, settings);

    ClusterState state;

    assertBusy(() -> {
        for (Client client : clients()) {
            ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
            assertThat(state1.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
        }
    });

    logger.info("--> start two more nodes");
    internalCluster().startNodes(2, settings);

    ensureGreen();
    ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
    assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));

    state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.nodes().getSize(), equalTo(4));

    createIndex("test");
    NumShards numShards = getNumShards("test");
    logger.info("--> indexing some data");
    for (int i = 0; i < 100; i++) {
        client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
    }
    ensureGreen();
    // make sure that all shards recovered before trying to flush
    assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false));
    // flush for simpler debugging
    client().admin().indices().prepareFlush().execute().actionGet();

    refresh();
    logger.info("--> verify we the data back");
    for (int i = 0; i < 10; i++) {
        assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
    }

    internalCluster().stopRandomNonMasterNode();
    internalCluster().stopRandomNonMasterNode();

    logger.info("--> verify that there is no master anymore on remaining nodes");
    // spin here to wait till the state is set
    assertNoMasterBlockOnAllNodes();

    logger.info("--> start back the 2 nodes ");
    String[] newNodes = internalCluster().startNodes(2, settings).stream().toArray(String[]::new);

    ensureGreen();
    clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
    assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));

    state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.nodes().getSize(), equalTo(4));
    // we prefer to elect up and running nodes
    assertThat(state.nodes().getMasterNodeId(), not(isOneOf(newNodes)));

    logger.info("--> verify we the data back");
    for (int i = 0; i < 10; i++) {
        assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:72,代码来源:MinimumMasterNodesIT.java

示例13: testAwarenessZones

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testAwarenessZones() throws Exception {
    Settings commonSettings = Settings.builder()
            .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b")
            .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone")
            .put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "10s")
            .build();

    logger.info("--> starting 4 nodes on different zones");
    List<String> nodes = internalCluster().startNodes(
            Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(),
            Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
            Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(),
            Settings.builder().put(commonSettings).put("node.attr.zone", "a").build()
    );
    String A_0 = nodes.get(0);
    String B_0 = nodes.get(1);
    String B_1 = nodes.get(2);
    String A_1 = nodes.get(3);

    logger.info("--> waiting for nodes to form a cluster");
    ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
    assertThat(health.isTimedOut(), equalTo(false));

    client().admin().indices().prepareCreate("test")
    .setSettings(Settings.builder().put("index.number_of_shards", 5)
            .put("index.number_of_replicas", 1)).execute().actionGet();

    logger.info("--> waiting for shards to be allocated");
    health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).execute().actionGet();
    assertThat(health.isTimedOut(), equalTo(false));

    ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
    ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();

    for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
        for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
            for (ShardRouting shardRouting : indexShardRoutingTable) {
                counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1);
            }
        }
    }
    assertThat(counts.get(A_1), anyOf(equalTo(2),equalTo(3)));
    assertThat(counts.get(B_1), anyOf(equalTo(2),equalTo(3)));
    assertThat(counts.get(A_0), anyOf(equalTo(2),equalTo(3)));
    assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:47,代码来源:AwarenessAllocationIT.java

示例14: testRestorePersistentSettings

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testRestorePersistentSettings() throws Exception {
    logger.info("--> start 2 nodes");
    Settings nodeSettings = Settings.builder()
            .put("discovery.type", "zen")
            .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
            .put("discovery.initial_state_timeout", "500ms")
            .build();
    internalCluster().startNode(nodeSettings);
    Client client = client();
    String secondNode = internalCluster().startNode(nodeSettings);
    logger.info("--> wait for the second node to join the cluster");
    assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut(), equalTo(false));

    int random = randomIntBetween(10, 42);

    logger.info("--> set test persistent setting");
    client.admin().cluster().prepareUpdateSettings().setPersistentSettings(
            Settings.builder()
                    .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2))
            .execute().actionGet();

    assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
            .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), equalTo(2));

    logger.info("--> create repository");
    PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
            .setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).execute().actionGet();
    assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));

    logger.info("--> start snapshot");
    CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
    assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
    assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));

    logger.info("--> clean the test persistent setting");
    client.admin().cluster().prepareUpdateSettings().setPersistentSettings(
            Settings.builder()
                    .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1))
            .execute().actionGet();
    assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
            .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), equalTo(1));

    stopNode(secondNode);
    assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut(), equalTo(false));

    logger.info("--> restore snapshot");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet();
        fail("can't restore minimum master nodes");
    } catch (IllegalArgumentException ex) {
        assertEquals("illegal value can't update [discovery.zen.minimum_master_nodes] from [1] to [2]", ex.getMessage());
        assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getCause().getMessage());
    }
    logger.info("--> ensure that zen discovery minimum master nodes wasn't restored");
    assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
            .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), not(equalTo(2)));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:59,代码来源:DedicatedClusterSnapshotRestoreIT.java

示例15: testFullRollingRestart

import org.elasticsearch.discovery.zen.ZenDiscovery; //导入依赖的package包/类
public void testFullRollingRestart() throws Exception {
    Settings settings = Settings.builder().put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "30s").build();
    internalCluster().startNode(settings);
    createIndex("test");

    final String healthTimeout = "1m";

    for (int i = 0; i < 1000; i++) {
        client().prepareIndex("test", "type1", Long.toString(i))
                .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
    }
    flush();
    for (int i = 1000; i < 2000; i++) {
        client().prepareIndex("test", "type1", Long.toString(i))
                .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
    }

    logger.info("--> now start adding nodes");
    internalCluster().startNode(settings);
    internalCluster().startNode(settings);

    // make sure the cluster state is green, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));

    logger.info("--> add two more nodes");
    internalCluster().startNode(settings);
    internalCluster().startNode(settings);

    // make sure the cluster state is green, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5"));

    logger.info("--> refreshing and checking data");
    refresh();
    for (int i = 0; i < 10; i++) {
        assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
    }

    // now start shutting nodes down
    internalCluster().stopRandomDataNode();
    // make sure the cluster state is green, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4"));

    internalCluster().stopRandomDataNode();
    // make sure the cluster state is green, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3"));

    logger.info("--> stopped two nodes, verifying data");
    refresh();
    for (int i = 0; i < 10; i++) {
        assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
    }

    // closing the 3rd node
    internalCluster().stopRandomDataNode();
    // make sure the cluster state is green, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2"));

    internalCluster().stopRandomDataNode();

    // make sure the cluster state is yellow, and all has been recovered
    assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("1"));

    logger.info("--> one node left, verifying data");
    refresh();
    for (int i = 0; i < 10; i++) {
        assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:69,代码来源:FullRollingRestartIT.java


注:本文中的org.elasticsearch.discovery.zen.ZenDiscovery类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。