本文整理汇总了Java中org.elasticsearch.cluster.node.DiscoveryNodes类的典型用法代码示例。如果您正苦于以下问题:Java DiscoveryNodes类的具体用法?Java DiscoveryNodes怎么用?Java DiscoveryNodes使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DiscoveryNodes类属于org.elasticsearch.cluster.node包,在下文中一共展示了DiscoveryNodes类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createClusterService
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
public static ClusterService createClusterService(Settings settings, ThreadPool threadPool, DiscoveryNode localNode) {
ClusterService clusterService = new ClusterService(
Settings.builder().put("cluster.name", "ClusterServiceTests").put(settings).build(),
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool, () -> localNode);
clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
@Override
public void connectToNodes(DiscoveryNodes discoveryNodes) {
// skip
}
@Override
public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) {
// skip
}
});
clusterService.setClusterStatePublisher((event, ackListener) -> {
});
clusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
clusterService.start();
final DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes());
nodes.masterNodeId(clusterService.localNode().getId());
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes));
return clusterService;
}
示例2: fillShardCacheWithDataNodes
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
/**
* Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from
* it nodes that are no longer part of the state.
*/
private void fillShardCacheWithDataNodes(Map<String, NodeEntry<T>> shardCache, DiscoveryNodes nodes) {
// verify that all current data nodes are there
for (ObjectObjectCursor<String, DiscoveryNode> cursor : nodes.dataNodes()) {
DiscoveryNode node = cursor.value;
if (shardCache.containsKey(node.getId()) == false) {
shardCache.put(node.getId(), new NodeEntry<T>(node.getId()));
}
}
// remove nodes that are not longer part of the data nodes set
for (Iterator<String> it = shardCache.keySet().iterator(); it.hasNext(); ) {
String nodeId = it.next();
if (nodes.nodeExists(nodeId) == false) {
it.remove();
}
}
}
示例3: listTasksResponseListener
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
/**
* Standard listener for extensions of {@link ListTasksResponse} that supports {@code group_by=nodes}.
*/
public static <T extends ListTasksResponse> ActionListener<T> listTasksResponseListener(
Supplier<DiscoveryNodes> nodesInCluster,
String groupBy,
final RestChannel channel) {
if ("nodes".equals(groupBy)) {
return new RestBuilderListener<T>(channel) {
@Override
public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception {
builder.startObject();
response.toXContentGroupedByNode(builder, channel.request(), nodesInCluster.get());
builder.endObject();
return new BytesRestResponse(RestStatus.OK, builder);
}
};
} else if ("parents".equals(groupBy)) {
return new RestToXContentListener<>(channel);
} else {
throw new IllegalArgumentException("[group_by] must be one of [nodes] or [parents] but was [" + groupBy + "]");
}
}
示例4: fillShardCacheWithDataNodes
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
/**
* Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from
* it nodes that are no longer part of the state.
*/
private void fillShardCacheWithDataNodes(Map<String, NodeEntry<T>> shardCache, DiscoveryNodes nodes) {
// verify that all current data nodes are there
for (ObjectObjectCursor<String, DiscoveryNode> cursor : nodes.getDataNodes()) {
DiscoveryNode node = cursor.value;
if (shardCache.containsKey(node.getId()) == false) {
shardCache.put(node.getId(), new NodeEntry<T>(node.getId()));
}
}
// remove nodes that are not longer part of the data nodes set
for (Iterator<String> it = shardCache.keySet().iterator(); it.hasNext(); ) {
String nodeId = it.next();
if (nodes.nodeExists(nodeId) == false) {
it.remove();
}
}
}
示例5: readFrom
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
ClusterName clusterName = new ClusterName(in);
Builder builder = new Builder(clusterName);
builder.version = in.readLong();
builder.uuid = in.readString();
builder.metaData = MetaData.readFrom(in);
builder.routingTable = RoutingTable.readFrom(in);
builder.nodes = DiscoveryNodes.readFrom(in, localNode);
builder.blocks = new ClusterBlocks(in);
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
Custom customIndexMetaData = in.readNamedWriteable(Custom.class);
builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData);
}
return builder.build();
}
示例6: testDeadNodesBeforeReplicaFailed
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
/**
* Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica.
* The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was
* removed from the cluster (deassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation
* id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id
* from the in-sync set.
*/
public void testDeadNodesBeforeReplicaFailed() throws Exception {
ClusterState clusterState = createOnePrimaryOneReplicaClusterState(allocation);
logger.info("remove replica node");
IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index("test").shard(0);
ShardRouting replicaShard = shardRoutingTable.replicaShards().get(0);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.remove(replicaShard.currentNodeId()))
.build();
clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute");
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(2));
logger.info("fail replica (for which there is no shard routing in the CS anymore)");
assertNull(clusterState.getRoutingNodes().getByAllocationId(replicaShard.shardId(), replicaShard.allocationId().getId()));
ShardStateAction.ShardFailedClusterStateTaskExecutor failedClusterStateTaskExecutor =
new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocation, null, logger);
long primaryTerm = clusterState.metaData().index("test").primaryTerm(0);
clusterState = failedClusterStateTaskExecutor.execute(clusterState, Arrays.asList(
new ShardEntry(shardRoutingTable.shardId(), replicaShard.allocationId().getId(), primaryTerm, "dummy", null))
).resultingState;
assertThat(clusterState.metaData().index("test").inSyncAllocationIds(0).size(), equalTo(1));
}
示例7: collectAttributeShards
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
private static List<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {
final ArrayList<ShardRouting> to = new ArrayList<>();
for (final String attribute : key.attributes) {
final String localAttributeValue = nodes.getLocalNode().getAttributes().get(attribute);
if (localAttributeValue != null) {
for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {
ShardRouting fromShard = iterator.next();
final DiscoveryNode discoveryNode = nodes.get(fromShard.currentNodeId());
if (discoveryNode == null) {
iterator.remove(); // node is not present anymore - ignore shard
} else if (localAttributeValue.equals(discoveryNode.getAttributes().get(attribute))) {
iterator.remove();
to.add(fromShard);
}
}
}
}
return Collections.unmodifiableList(to);
}
示例8: disconnectFromNodesExcept
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
/**
* Disconnects from all nodes except the ones provided as parameter
*/
public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) {
Set<DiscoveryNode> currentNodes = new HashSet<>(nodes.keySet());
for (DiscoveryNode node : nodesToKeep) {
currentNodes.remove(node);
}
for (final DiscoveryNode node : currentNodes) {
try (Releasable ignored = nodeLocks.acquire(node)) {
Integer current = nodes.remove(node);
assert current != null : "node " + node + " was removed in event but not in internal nodes";
try {
transportService.disconnectFromNode(node);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e);
}
}
}
}
示例9: execute
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder;
nodesBuilder = DiscoveryNodes.builder(currentState.nodes());
if (currentState.nodes().nodeExists(node.id())) {
logger.debug("received a join request for an existing node [{}]", node);
return currentState;
}
// If this node is not in dead node list, then ignore this request
ImmutableOpenMap<String, DiscoveryNode> deadNodes = clusterService.state().nodes().deadNodes();
if (deadNodes.get(node.getIpPortAddress()) == null) {
logger.warn("failed to find node [{}] in node list, ignore the join request", node);
throw new IllegalStateException("could not find this node " + node + " from active node list and dead node list");
}
nodesBuilder.put(node);
nodesBuilder.removeDeadNodeByIpPort(node);
final ClusterState.Builder newStateBuilder = ClusterState.builder(currentState);
newStateBuilder.nodes(nodesBuilder);
ClusterState newState = newStateBuilder.build();
return newState;
}
示例10: testClusterStateSerialization
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
public void testClusterStateSerialization() throws Exception {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test_idx").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
.put(IndexTemplateMetaData.builder("test_template").build())
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test_idx"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", buildNewFakeTransportAddress(),
emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes)
.metaData(metaData).routingTable(routingTable).build();
AllocationService strategy = createAllocationService();
clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build();
String clusterStateString = Strings.toString(clusterState);
assertNotNull(clusterStateString);
assertThat(clusterStateString, containsString("test_idx"));
assertThat(clusterStateString, containsString("test_template"));
assertThat(clusterStateString, containsString("node_foo"));
}
示例11: AckCountDownListener
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
this.ackedTaskListener = ackedTaskListener;
this.clusterStateVersion = clusterStateVersion;
this.nodes = nodes;
int countDown = 0;
for (DiscoveryNode node : nodes) {
if (ackedTaskListener.mustAck(node)) {
countDown++;
}
}
//we always wait for at least 1 node (the master)
countDown = Math.max(1, countDown);
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
this.countDown = new CountDown(countDown);
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
@Override
public void run() {
onTimeout();
}
});
}
示例12: testUnexpectedDiffPublishing
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
public void testUnexpectedDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, event -> {
fail("Shouldn't send cluster state to myself");
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't
// get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder()
.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
}
示例13: executeKillOnAllNodes
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
public void executeKillOnAllNodes(KillJobsRequest request, ActionListener<KillResponse> listener) {
DiscoveryNodes nodes = clusterService.state().nodes();
listener = new MultiActionListener<>(nodes.size(), KillResponse.MERGE_FUNCTION, listener);
DefaultTransportResponseHandler<KillResponse> transportResponseHandler =
new DefaultTransportResponseHandler<KillResponse>(listener) {
@Override
public KillResponse newInstance() {
return new KillResponse(0);
}
};
logger.trace("Sending {} to {}", request, nodes);
for (DiscoveryNode node : nodes) {
transportService.sendRequest(node, TRANSPORT_ACTION, request, transportResponseHandler);
}
}
示例14: getRecoverOnAnyNodeRoutingAllocation
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders, String... allocIds) {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT)
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true))
.numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(0, Sets.newHashSet(allocIds)))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build();
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);
}
示例15: onePrimaryOnNode1And1ReplicaRecovering
import org.elasticsearch.cluster.node.DiscoveryNodes; //导入依赖的package包/类
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId, node1.getId(), true, ShardRoutingState.STARTED);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1)
.putInSyncAllocationIds(0, Sets.newHashSet(primaryShard.allocationId().getId())))
.build();
RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(primaryShard)
.addShard(TestShardRouting.newShardRouting(shardId, node2.getId(), null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
.build())
)
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build();
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false);
}