本文整理汇总了Java中org.apache.hadoop.yarn.api.records.Container类的典型用法代码示例。如果您正苦于以下问题:Java Container类的具体用法?Java Container怎么用?Java Container使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Container类属于org.apache.hadoop.yarn.api.records包,在下文中一共展示了Container类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newContainerInstance
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
private Container newContainerInstance(int id, Priority priority,
Resource capability, String hostName) throws IOException {
NodeId nodeId = NodeId.newInstance(hostName, 0);
Container container = Records.newRecord(Container.class);
container.setNodeId(nodeId);
container.setPriority(priority);
container.setResource(capability);
container.setId(ContainerId.newContainerId(appAttemptId, ++containerId));
Token token = Token.newInstance(nodeId.toString().getBytes(),
nodeId.toString(), nodeId.toString().getBytes(), nodeId.toString());
byte[] bytes = container.getId().toString().getBytes();
ByteBuffer buffer = ByteBuffer.allocate(bytes.length);
buffer.put(bytes);
token.setIdentifier(buffer);
container.setContainerToken(token);
container.setNodeHttpAddress(hostName + ":0");
return container;
}
示例2: containerAssigned
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void containerAssigned(Container allocated,
ContainerRequest assigned) {
// Update resource requests
decContainerReq(assigned);
// send the container-assigned event to task attempt
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
assigned.attemptID, allocated, applicationACLs));
assignedRequests.add(allocated, assigned.attemptID);
if (LOG.isDebugEnabled()) {
LOG.info("Assigned container (" + allocated + ") "
+ " to task " + assigned.attemptID + " on node "
+ allocated.getNodeId().toString());
}
}
示例3: testSuccessfulFinishingToFinished
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@Test
public void testSuccessfulFinishingToFinished() {
Container amContainer = allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false);
FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED;
String trackingUrl = "mytrackingurl";
String diagnostics = "Successful";
unregisterApplicationAttempt(amContainer, finalStatus, trackingUrl,
diagnostics);
NodeId anyNodeId = NodeId.newInstance("host", 1234);
applicationAttempt.handle(
new RMAppAttemptContainerFinishedEvent(
applicationAttempt.getAppAttemptId(),
BuilderUtils.newContainerStatus(amContainer.getId(),
ContainerState.COMPLETE, "", 0), anyNodeId));
testAppAttemptFinishedState(amContainer, finalStatus, trackingUrl,
diagnostics, 0, false);
}
示例4: associateTaskWithContainer
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
private void associateTaskWithContainer(TaskStatusLocator locator, Container container) throws Exception {
TaskStatus taskStatus = getTaskStatus(locator);
String containerId = container.getId().toString();
taskStatus.setContainerId(containerId);
taskStatus.setContainerHost(container.getNodeId().getHost());
taskStatus.setContainerIp(
DnsClient.resolveExternalIPv4Address(taskStatus.getContainerHost()));
taskStatus.setContainerLogHttpAddress(
HadoopUtils.getContainerLogHttpAddress(container.getNodeHttpAddress(), containerId, conf.getAmUser()));
taskStatus.setContainerConnectionLostCount(0);
taskStatus.setContainerGpus(
ResourceDescriptor.fromResource(container.getResource()).getGpuAttribute());
taskStatusesesChanged.put(locator.getTaskRoleName(), true);
}
示例5: addContainersFromPreviousAttemptToProto
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
private void addContainersFromPreviousAttemptToProto() {
maybeInitBuilder();
builder.clearContainersFromPreviousAttempts();
List<ContainerProto> list = new ArrayList<ContainerProto>();
for (Container c : containersFromPreviousAttempts) {
list.add(convertToProtoFormat(c));
}
builder.addAllContainersFromPreviousAttempts(list);
}
示例6: testAMCrashAtAllocated
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@Test
public void testAMCrashAtAllocated() {
Container amContainer = allocateApplicationAttempt();
String containerDiagMsg = "some error";
int exitCode = 123;
ContainerStatus cs =
BuilderUtils.newContainerStatus(amContainer.getId(),
ContainerState.COMPLETE, containerDiagMsg, exitCode);
NodeId anyNodeId = NodeId.newInstance("host", 1234);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
applicationAttempt.getAppAttemptId(), cs, anyNodeId));
assertEquals(YarnApplicationAttemptState.ALLOCATED,
applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,
applicationAttempt.getAppAttemptState());
verifyTokenCount(applicationAttempt.getAppAttemptId(), 1);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
boolean shouldCheckURL = (applicationAttempt.getTrackingUrl() != null);
verifyAMCrashAtAllocatedDiagnosticInfo(applicationAttempt.getDiagnostics(),
exitCode, shouldCheckURL);
}
示例7: newAllocateResponse
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
public static AllocateResponse newAllocateResponse(int responseId,
List<ContainerStatus> completedContainers,
List<Container> allocatedContainers, List<NodeReport> updatedNodes,
Resource availResources, AMCommand command, int numClusterNodes,
PreemptionMessage preempt) {
AllocateResponse response = recordFactory
.newRecordInstance(AllocateResponse.class);
response.setNumClusterNodes(numClusterNodes);
response.setResponseId(responseId);
response.setCompletedContainersStatuses(completedContainers);
response.setAllocatedContainers(allocatedContainers);
response.setUpdatedNodes(updatedNodes);
response.setAvailableResources(availResources);
response.setAMCommand(command);
response.setPreemptionMessage(preempt);
return response;
}
示例8: onContainersAllocated
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
/**
* handle method for AMRMClientAsync.CallbackHandler container allocation
*
* @param containers
*/
private synchronized void onContainersAllocated(List<Container> containers) {
if (this.startAbort) {
this.freeUnusedContainers(containers);
return;
}
Collection<Container> freelist = new java.util.LinkedList<Container>();
for (Container c : containers) {
if(blackList.contains(c.getNodeHttpAddress())){
//launchDummyTask(c);
continue;
}
TaskRecord task;
task = pendingTasks.poll();
if (task == null) {
freelist.add(c);
continue;
}
this.launchTask(c, task);
}
this.freeUnusedContainers(freelist);
}
示例9: onContainerStarted
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@Override
public void onContainerStarted(ContainerId containerId,
Map<String, ByteBuffer> allServiceResponse) {
if (LOG.isDebugEnabled()) {
LOG.debug("Succeeded to start Container " + containerId);
}
Container container = containers.get(containerId);
if (container != null) {
applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
}
if(applicationMaster.timelineClient != null) {
ApplicationMaster.publishContainerStartEvent(
applicationMaster.timelineClient, container,
applicationMaster.domainId, applicationMaster.appSubmitterUgi);
}
}
示例10: launchDummyTask
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
private synchronized void launchDummyTask(Container container){
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
String new_command = "./launcher.py";
String cmd = new_command + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
+ "/stderr";
ctx.setCommands(Collections.singletonList(cmd));
ctx.setTokens(setupTokens());
ctx.setLocalResources(this.workerResources);
synchronized (this){
this.nmClient.startContainerAsync(container, ctx);
}
}
示例11: testAppAttemptRunningState
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
/**
* {@link RMAppAttemptState#RUNNING}
*/
private void testAppAttemptRunningState(Container container,
String host, int rpcPort, String trackingUrl, boolean unmanagedAM) {
assertEquals(RMAppAttemptState.RUNNING,
applicationAttempt.getAppAttemptState());
assertEquals(container, applicationAttempt.getMasterContainer());
assertEquals(host, applicationAttempt.getHost());
assertEquals(rpcPort, applicationAttempt.getRpcPort());
verifyUrl(trackingUrl, applicationAttempt.getOriginalTrackingUrl());
if (unmanagedAM) {
verifyUrl(trackingUrl, applicationAttempt.getTrackingUrl());
} else {
assertEquals(getProxyUrl(applicationAttempt),
applicationAttempt.getTrackingUrl());
}
// TODO - need to add more checks relevant to this state
}
示例12: testAllocatedToFailed
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@Test
public void testAllocatedToFailed() {
Container amContainer = allocateApplicationAttempt();
String diagnostics = "Launch Failed";
applicationAttempt.handle(
new RMAppAttemptLaunchFailedEvent(
applicationAttempt.getAppAttemptId(),
diagnostics));
assertEquals(YarnApplicationAttemptState.ALLOCATED,
applicationAttempt.createApplicationAttemptState());
testAppAttemptFailedState(amContainer, diagnostics);
}
示例13: allocateNodeLocal
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
/**
* The {@link ResourceScheduler} is allocating data-local resources to the
* application.
*
* @param allocatedContainers
* resources allocated to the application
*/
synchronized private void allocateNodeLocal(SchedulerNode node,
Priority priority, ResourceRequest nodeLocalRequest, Container container,
List<ResourceRequest> resourceRequests) {
// Update future requirements
decResourceRequest(node.getNodeName(), priority, nodeLocalRequest);
ResourceRequest rackLocalRequest = requests.get(priority).get(
node.getRackName());
decResourceRequest(node.getRackName(), priority, rackLocalRequest);
ResourceRequest offRackRequest = requests.get(priority).get(
ResourceRequest.ANY);
decrementOutstanding(offRackRequest);
// Update cloned NodeLocal, RackLocal and OffRack requests for recovery
resourceRequests.add(cloneResourceRequest(nodeLocalRequest));
resourceRequests.add(cloneResourceRequest(rackLocalRequest));
resourceRequests.add(cloneResourceRequest(offRackRequest));
}
示例14: newInstance
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@Public
@Stable
public static AllocateResponse newInstance(int responseId,
List<ContainerStatus> completedContainers,
List<Container> allocatedContainers, List<NodeReport> updatedNodes,
Resource availResources, AMCommand command, int numClusterNodes,
PreemptionMessage preempt, List<NMToken> nmTokens) {
AllocateResponse response = Records.newRecord(AllocateResponse.class);
response.setNumClusterNodes(numClusterNodes);
response.setResponseId(responseId);
response.setCompletedContainersStatuses(completedContainers);
response.setAllocatedContainers(allocatedContainers);
response.setUpdatedNodes(updatedNodes);
response.setAvailableResources(availResources);
response.setAMCommand(command);
response.setPreemptionMessage(preempt);
response.setNMTokens(nmTokens);
return response;
}
示例15: assignToFailedMap
import org.apache.hadoop.yarn.api.records.Container; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
//try to assign to earlierFailedMaps if present
ContainerRequest assigned = null;
while (assigned == null && earlierFailedMaps.size() > 0
&& canAssignMaps()) {
TaskAttemptId tId = earlierFailedMaps.removeFirst();
if (maps.containsKey(tId)) {
assigned = maps.remove(tId);
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
LOG.info("Assigned from earlierFailedMaps");
break;
}
}
return assigned;
}