本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode类的典型用法代码示例。如果您正苦于以下问题:Java RMNode类的具体用法?Java RMNode怎么用?Java RMNode使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RMNode类属于org.apache.hadoop.yarn.server.resourcemanager.rmnode包,在下文中一共展示了RMNode类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testResourceUpdateOnRebootedNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Test
public void testResourceUpdateOnRebootedNode() {
RMNodeImpl node = getRebootedNode();
Resource oldCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2, 2),
RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
assertEquals("GPU resource is not match.", newCapacity.getGpuCores(), 2);
Assert.assertEquals(NodeState.REBOOTED, node.getState());
}
示例2: getClusterNodes
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Override
public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request)
throws YarnException {
GetClusterNodesResponse response =
recordFactory.newRecordInstance(GetClusterNodesResponse.class);
EnumSet<NodeState> nodeStates = request.getNodeStates();
if (nodeStates == null || nodeStates.isEmpty()) {
nodeStates = EnumSet.allOf(NodeState.class);
}
Collection<RMNode> nodes = RMServerUtils.queryRMNodes(rmContext,
nodeStates);
List<NodeReport> nodeReports = new ArrayList<NodeReport>(nodes.size());
for (RMNode nodeInfo : nodes) {
nodeReports.add(createNodeReports(nodeInfo));
}
response.setNodeReports(nodeReports);
return response;
}
示例3: createNodeReports
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
private NodeReport createNodeReports(RMNode rmNode) {
SchedulerNodeReport schedulerNodeReport =
scheduler.getNodeReport(rmNode.getNodeID());
Resource used = BuilderUtils.newResource(0, 0, 0);
int numContainers = 0;
if (schedulerNodeReport != null) {
used = schedulerNodeReport.getUsedResource();
numContainers = schedulerNodeReport.getNumContainers();
}
NodeReport report =
BuilderUtils.newNodeReport(rmNode.getNodeID(), rmNode.getState(),
rmNode.getHttpAddress(), rmNode.getRackName(), used,
rmNode.getTotalCapability(), numContainers,
rmNode.getHealthReport(), rmNode.getLastHealthReportTime(),
rmNode.getNodeLabels());
return report;
}
示例4: getMockNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
public static FiCaSchedulerNode getMockNode(
String host, String rack, int port, int capability) {
NodeId nodeId = mock(NodeId.class);
when(nodeId.getHost()).thenReturn(host);
when(nodeId.getPort()).thenReturn(port);
RMNode rmNode = mock(RMNode.class);
when(rmNode.getNodeID()).thenReturn(nodeId);
when(rmNode.getTotalCapability()).thenReturn(
Resources.createResource(capability, 1));
when(rmNode.getNodeAddress()).thenReturn(host+":"+port);
when(rmNode.getHostName()).thenReturn(host);
when(rmNode.getRackName()).thenReturn(rack);
FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false));
LOG.info("node = " + host + " avail=" + node.getAvailableResource());
return node;
}
示例5: waitForNodesRunning
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
private void waitForNodesRunning() throws InterruptedException {
long startTimeMS = System.currentTimeMillis();
while (true) {
int numRunningNodes = 0;
for (RMNode node : rm.getRMContext().getRMNodes().values()) {
if (node.getState() == NodeState.RUNNING) {
numRunningNodes ++;
}
}
if (numRunningNodes == numNMs) {
break;
}
LOG.info(MessageFormat.format("SLSRunner is waiting for all " +
"nodes RUNNING. {0} of {1} NMs initialized.",
numRunningNodes, numNMs));
Thread.sleep(1000);
}
LOG.info(MessageFormat.format("SLSRunner takes {0} ms to launch all nodes.",
(System.currentTimeMillis() - startTimeMS)));
}
示例6: testResourceUpdateOnNewNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Test
public void testResourceUpdateOnNewNode() {
RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4, 4));
Resource oldCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2, 2),
RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
assertEquals("GPU resource is not match.", newCapacity.getGpuCores(), 2);
Assert.assertEquals(NodeState.NEW, node.getState());
}
示例7: addNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
private synchronized void addNode(RMNode nodeManager) {
FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
usePortForNodeName, nodeManager.getNodeLabels());
this.nodes.put(nodeManager.getNodeID(), schedulerNode);
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
// update this node to node label manager
if (labelManager != null) {
labelManager.activateNode(nodeManager.getNodeID(),
nodeManager.getTotalCapability());
}
root.updateClusterResource(clusterResource, new ResourceLimits(
clusterResource));
int numNodes = numNodeManagers.incrementAndGet();
updateMaximumAllocation(schedulerNode, true);
LOG.info("Added node " + nodeManager.getNodeAddress() +
" clusterResource: " + clusterResource);
if (scheduleAsynchronously && numNodes == 1) {
asyncSchedulerThread.beginSchedule();
}
}
示例8: testNoMoreCpuOnNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Test
public void testNoMoreCpuOnNode() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1, 1),
1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId = createSchedulingRequest(1024, 1, 1, "default",
"user1", 2);
FSAppAttempt app = scheduler.getSchedulerApp(attId);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1, app.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(1, app.getLiveContainers().size());
}
示例9: setupFairScheduler
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
public static FairScheduler setupFairScheduler(
ReservationSystemTestUtil testUtil,
RMContext rmContext, Configuration conf, int numContainers) throws
IOException {
FairScheduler scheduler = new FairScheduler();
scheduler.setRMContext(rmContext);
when(rmContext.getScheduler()).thenReturn(scheduler);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, rmContext);
Resource resource = testUtil.calculateClusterResource(numContainers);
RMNode node1 = MockNodes.newNodeInfo(1, resource, 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
return scheduler;
}
示例10: newNodes
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
public static List<RMNode> newNodes(int racks, int nodesPerRack,
Resource perNode) {
List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) {
if (j == (nodesPerRack - 1)) {
// One unhealthy node per rack.
list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
}
if (j == 0) {
// One node with label
list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x")));
} else {
list.add(newNodeInfo(i, perNode));
}
}
}
return list;
}
示例11: buildRMNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
private static RMNode buildRMNode(int rack, final Resource perNode,
NodeState state, String httpAddr, int hostnum, String hostName, int port,
Set<String> labels) {
final String rackName = "rack"+ rack;
final int nid = hostnum;
final String nodeAddr = hostName + ":" + nid;
if (hostName == null) {
hostName = "host"+ nid;
}
final NodeId nodeID = NodeId.newInstance(hostName, port);
final String httpAddress = httpAddr;
String healthReport = (state == NodeState.UNHEALTHY) ? null : "HealthyMe";
return new MockRMNodeImpl(nodeID, nodeAddr, httpAddress, perNode,
rackName, healthReport, 0, nid, hostName, state, labels);
}
示例12: testAggregateCapacityTracking
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Test
public void testAggregateCapacityTracking() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add a node
RMNode node1 =
MockNodes
.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
assertEquals(1024, scheduler.getClusterResource().getMemory());
// Add another node
RMNode node2 =
MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
assertEquals(1536, scheduler.getClusterResource().getMemory());
// Remove the first node
NodeRemovedSchedulerEvent nodeEvent3 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent3);
assertEquals(512, scheduler.getClusterResource().getMemory());
}
示例13: testResourceUpdateOnRunningNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@Test
public void testResourceUpdateOnRunningNode() {
RMNodeImpl node = getRunningNode();
Resource oldCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2, 2),
RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
assertEquals("GPU resource is not match.", newCapacity.getGpuCores(), 2);
Assert.assertEquals(NodeState.RUNNING, node.getState());
Assert.assertNotNull(nodesListManagerEvent);
Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,
nodesListManagerEvent.getType());
}
示例14: getNodes
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
/**
* Returns all nodes in the cluster. If the states param is given, returns
* all nodes that are in the comma-separated list of states.
*/
@GET
@Path("/nodes")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public NodesInfo getNodes(@QueryParam("states") String states) {
init();
ResourceScheduler sched = this.rm.getResourceScheduler();
if (sched == null) {
throw new NotFoundException("Null ResourceScheduler instance");
}
EnumSet<NodeState> acceptedStates;
if (states == null) {
acceptedStates = EnumSet.allOf(NodeState.class);
} else {
acceptedStates = EnumSet.noneOf(NodeState.class);
for (String stateStr : states.split(",")) {
acceptedStates.add(
NodeState.valueOf(StringUtils.toUpperCase(stateStr)));
}
}
Collection<RMNode> rmNodes = RMServerUtils.queryRMNodes(this.rm.getRMContext(),
acceptedStates);
NodesInfo nodesInfo = new NodesInfo();
for (RMNode rmNode : rmNodes) {
NodeInfo nodeInfo = new NodeInfo(rmNode, sched);
if (EnumSet.of(NodeState.LOST, NodeState.DECOMMISSIONED, NodeState.REBOOTED)
.contains(rmNode.getState())) {
nodeInfo.setNodeHTTPAddress(EMPTY);
}
nodesInfo.add(nodeInfo);
}
return nodesInfo;
}
示例15: getNode
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; //导入依赖的package包/类
@GET
@Path("/nodes/{nodeId}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
init();
if (nodeId == null || nodeId.isEmpty()) {
throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
}
ResourceScheduler sched = this.rm.getResourceScheduler();
if (sched == null) {
throw new NotFoundException("Null ResourceScheduler instance");
}
NodeId nid = ConverterUtils.toNodeId(nodeId);
RMNode ni = this.rm.getRMContext().getRMNodes().get(nid);
boolean isInactive = false;
if (ni == null) {
ni = this.rm.getRMContext().getInactiveRMNodes().get(nid.getHost());
if (ni == null) {
throw new NotFoundException("nodeId, " + nodeId + ", is not found");
}
isInactive = true;
}
NodeInfo nodeInfo = new NodeInfo(ni, sched);
if (isInactive) {
nodeInfo.setNodeHTTPAddress(EMPTY);
}
return nodeInfo;
}