本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Resource類的典型用法代碼示例。如果您正苦於以下問題:Java Resource類的具體用法?Java Resource怎麽用?Java Resource使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Resource類屬於org.apache.hadoop.yarn.api.records包,在下文中一共展示了Resource類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: generateAllocation
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
private Map<ReservationInterval, ReservationRequest> generateAllocation(
int startTime, int[] alloc, boolean isStep) {
Map<ReservationInterval, ReservationRequest> req =
new HashMap<ReservationInterval, ReservationRequest>();
int numContainers = 0;
for (int i = 0; i < alloc.length; i++) {
if (isStep) {
numContainers = alloc[i] + i;
} else {
numContainers = alloc[i];
}
ReservationRequest rr =
ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
(numContainers));
req.put(new ReservationInterval(startTime + i, startTime + i + 1), rr);
}
return req;
}
示例2: initNodeIds
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
private void initNodeIds(int nodeNums, int containerNums, Resource resource) {
Random portRandom = new Random();
Random ipRandom = new Random();
for (int i = 0; i < nodeNums; i++) {
NodeReport nodeReport = Records.newRecord(NodeReport.class);
nodeReport.setNumContainers(containerNums);
nodeReport.setNodeLabels(new HashSet<>());
nodeReport.setNodeState(NodeState.RUNNING);
nodeReport.setCapability(resource);
nodeReport.setUsed(Resource.newInstance(0, 0));
int port = 1024 + portRandom.nextInt(65535 - 1024 + 1);
StringBuilder hostStr = new StringBuilder();
for (int j = 0; j < 4; j++) {
hostStr.append(".").append(ipRandom.nextInt(256));
}
NodeId nodeId = NodeId.newInstance(hostStr.substring(1), port);
nodeReport.setNodeId(nodeId);
nodeReport.setHttpAddress(nodeId.getHost());
nodeReportList.add(nodeReport);
}
}
示例3: testNodeRegistrationSuccess
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
@Test
public void testNodeRegistrationSuccess() throws Exception {
writeToHostsFile("host2");
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
.getAbsolutePath());
rm = new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService();
RegisterNodeManagerRequest req = Records.newRecord(
RegisterNodeManagerRequest.class);
NodeId nodeId = NodeId.newInstance("host2", 1234);
Resource capability = BuilderUtils.newResource(1024, 1, 1);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(YarnVersionInfo.getVersion());
// trying to register a invalid node.
RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.NORMAL,response.getNodeAction());
}
示例4: assignReservedContainer
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
/**
* Called when this application already has an existing reservation on the
* given node. Sees whether we can turn the reservation into an allocation.
* Also checks whether the application needs the reservation anymore, and
* releases it if not.
*
* @param node
* Node that the application has an existing reservation on
*/
public Resource assignReservedContainer(FSSchedulerNode node) {
RMContainer rmContainer = node.getReservedContainer();
Priority priority = rmContainer.getReservedPriority();
// Make sure the application still needs requests at this priority
if (getTotalRequiredResources(priority) == 0) {
unreserve(priority, node);
return Resources.none();
}
// Fail early if the reserved container won't fit.
// Note that we have an assumption here that there's only one container size
// per priority.
if (!Resources.fitsIn(node.getReservedContainer().getReservedResource(),
node.getAvailableResource())) {
return Resources.none();
}
return assignContainer(node, true);
}
示例5: updateCurrentResourceLimits
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
private void updateCurrentResourceLimits(
ResourceLimits currentResourceLimits, Resource clusterResource) {
// TODO: need consider non-empty node labels when resource limits supports
// node labels
// Even if ParentQueue will set limits respect child's max queue capacity,
// but when allocating reserved container, CapacityScheduler doesn't do
// this. So need cap limits by queue's max capacity here.
this.cachedResourceLimitsForHeadroom = new ResourceLimits(currentResourceLimits.getLimit());
Resource queueMaxResource =
Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager
.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource),
queueCapacities
.getAbsoluteMaximumCapacity(RMNodeLabelsManager.NO_LABEL),
minimumAllocation);
this.cachedResourceLimitsForHeadroom.setLimit(Resources.min(resourceCalculator,
clusterResource, queueMaxResource, currentResourceLimits.getLimit()));
}
示例6: MockRMNodeImpl
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
Resource perNode, String rackName, String healthReport,
long lastHealthReportTime, int cmdPort, String hostName, NodeState state,
Set<String> labels) {
this.nodeId = nodeId;
this.nodeAddr = nodeAddr;
this.httpAddress = httpAddress;
this.perNode = perNode;
this.rackName = rackName;
this.healthReport = healthReport;
this.lastHealthReportTime = lastHealthReportTime;
this.cmdPort = cmdPort;
this.hostName = hostName;
this.state = state;
this.labels = labels;
}
示例7: attachContainer
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
@Override
public void attachContainer(Resource clusterResource,
FiCaSchedulerApp application, RMContainer rmContainer) {
if (application != null) {
FiCaSchedulerNode node =
scheduler.getNode(rmContainer.getContainer().getNodeId());
super.allocateResource(clusterResource, rmContainer.getContainer()
.getResource(), node.getLabels());
LOG.info("movedContainer" + " queueMoveIn=" + getQueueName()
+ " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity="
+ getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed() + " cluster="
+ clusterResource);
// Inform the parent
if (parent != null) {
parent.attachContainer(clusterResource, application, rmContainer);
}
}
}
示例8: createSimpleReservationSubmissionRequest
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
private ReservationSubmissionRequest createSimpleReservationSubmissionRequest(
int numRequests, int numContainers, long arrival, long deadline,
long duration) {
// create a request with a single atomic ask
ReservationSubmissionRequest request =
new ReservationSubmissionRequestPBImpl();
ReservationDefinition rDef = new ReservationDefinitionPBImpl();
rDef.setArrival(arrival);
rDef.setDeadline(deadline);
if (numRequests > 0) {
ReservationRequests reqs = new ReservationRequestsPBImpl();
rDef.setReservationRequests(reqs);
if (numContainers > 0) {
ReservationRequest r =
ReservationRequest.newInstance(Resource.newInstance(1024, 1, 1),
numContainers, 1, duration);
reqs.setReservationResources(Collections.singletonList(r));
reqs.setInterpreter(ReservationRequestInterpreter.R_ALL);
}
}
request.setQueue(PLAN_NAME);
request.setReservationDefinition(rDef);
return request;
}
示例9: getQueueResource
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
public Resource getQueueResource(String queueName, Set<String> queueLabels,
Resource clusterResource) {
try {
readLock.lock();
if (queueLabels.contains(ANY)) {
return clusterResource;
}
Queue q = queueCollections.get(queueName);
if (null == q) {
return Resources.none();
}
return q.resource;
} finally {
readLock.unlock();
}
}
示例10: refreshMaximumAllocation
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
protected void refreshMaximumAllocation(Resource newMaxAlloc) {
maxAllocWriteLock.lock();
try {
configuredMaximumAllocation = Resources.clone(newMaxAlloc);
int maxMemory = newMaxAlloc.getMemory();
if (maxNodeMemory != -1) {
maxMemory = Math.min(maxMemory, maxNodeMemory);
}
int maxVcores = newMaxAlloc.getVirtualCores();
if (maxNodeVCores != -1) {
maxVcores = Math.min(maxVcores, maxNodeVCores);
}
int maxGcores = newMaxAlloc.getGpuCores();
if (maxNodeGCores != -1) {
maxGcores = Math.min(maxGcores, maxNodeGCores);
}
maximumAllocation = Resources.createResource(maxMemory, maxVcores, maxGcores);
} finally {
maxAllocWriteLock.unlock();
}
}
示例11: getUserAMResourceLimitPerPartition
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
public synchronized Resource getUserAMResourceLimitPerPartition(
String nodePartition) {
/*
* The user am resource limit is based on the same approach as the user
* limit (as it should represent a subset of that). This means that it uses
* the absolute queue capacity (per partition) instead of the max and is
* modified by the userlimit and the userlimit factor as is the userlimit
*/
float effectiveUserLimit = Math.max(userLimit / 100.0f,
1.0f / Math.max(getActiveUsersManager().getNumActiveUsers(), 1));
Resource queuePartitionResource = Resources.multiplyAndNormalizeUp(
resourceCalculator,
labelManager.getResourceByLabel(nodePartition, lastClusterResource),
queueCapacities.getAbsoluteCapacity(nodePartition), minimumAllocation);
return Resources.multiplyAndNormalizeUp(resourceCalculator,
queuePartitionResource,
queueCapacities.getMaxAMResourcePercentage(nodePartition)
* effectiveUserLimit * userLimitFactor, minimumAllocation);
}
示例12: testBlocks
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
@Test
public void testBlocks() {
ReservationId reservationID =
ReservationId.newInstance(rand.nextLong(), rand.nextLong());
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
Map<ReservationInterval, ReservationRequest> allocations =
generateAllocation(start, alloc, false, false);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
start, start + alloc.length + 1, allocations, resCalc, minAlloc);
doAssertions(rAllocation, reservationID, rDef, allocations, start, alloc);
Assert.assertFalse(rAllocation.containsGangs());
for (int i = 0; i < alloc.length; i++) {
Assert.assertEquals(Resource.newInstance(1024 * (alloc[i]), (alloc[i]), (alloc[i])),
rAllocation.getResourcesAtTime(start + i));
}
}
示例13: newInstance
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
@Public
@Unstable
public static ContainerHistoryData newInstance(ContainerId containerId,
Resource allocatedResource, NodeId assignedNode, Priority priority,
long startTime, long finishTime, String diagnosticsInfo,
int containerExitCode, ContainerState containerState) {
ContainerHistoryData containerHD = new ContainerHistoryData();
containerHD.setContainerId(containerId);
containerHD.setAllocatedResource(allocatedResource);
containerHD.setAssignedNode(assignedNode);
containerHD.setPriority(priority);
containerHD.setStartTime(startTime);
containerHD.setFinishTime(finishTime);
containerHD.setDiagnosticsInfo(diagnosticsInfo);
containerHD.setContainerExitStatus(containerExitCode);
containerHD.setContainerState(containerState);
return containerHD;
}
示例14: newNodes
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
public static List<RMNode> newNodes(int racks, int nodesPerRack,
Resource perNode) {
List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) {
if (j == (nodesPerRack - 1)) {
// One unhealthy node per rack.
list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
}
if (j == 0) {
// One node with label
list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x")));
} else {
list.add(newNodeInfo(i, perNode));
}
}
}
return list;
}
示例15: getApplicationReport
import org.apache.hadoop.yarn.api.records.Resource; //導入依賴的package包/類
private ApplicationReport getApplicationReport(
YarnApplicationState yarnApplicationState,
FinalApplicationStatus finalApplicationStatus) {
ApplicationReport appReport = Mockito.mock(ApplicationReport.class);
ApplicationResourceUsageReport appResources = Mockito
.mock(ApplicationResourceUsageReport.class);
Mockito.when(appReport.getApplicationId()).thenReturn(
ApplicationId.newInstance(0, 0));
Mockito.when(appResources.getNeededResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appResources.getReservedResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appResources.getUsedResources()).thenReturn(
Records.newRecord(Resource.class));
Mockito.when(appReport.getApplicationResourceUsageReport()).thenReturn(
appResources);
Mockito.when(appReport.getYarnApplicationState()).thenReturn(
yarnApplicationState);
Mockito.when(appReport.getFinalApplicationStatus()).thenReturn(
finalApplicationStatus);
return appReport;
}