本文整理汇总了Java中org.apache.hadoop.yarn.util.Records类的典型用法代码示例。如果您正苦于以下问题:Java Records类的具体用法?Java Records怎么用?Java Records使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Records类属于org.apache.hadoop.yarn.util包,在下文中一共展示了Records类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newResourceLocalizationSpec
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
public static ResourceLocalizationSpec newResourceLocalizationSpec(
LocalResource rsrc, Path path) {
URL local = ConverterUtils.getYarnUrlFromPath(path);
ResourceLocalizationSpec resourceLocalizationSpec =
Records.newRecord(ResourceLocalizationSpec.class);
resourceLocalizationSpec.setDestinationDirectory(local);
resourceLocalizationSpec.setResource(rsrc);
return resourceLocalizationSpec;
}
示例2: allocate
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
HPCAllocateRequest allocateRequest = new HPCAllocateRequest();
allocateRequest.setAppProgress(request.getProgress());
allocateRequest.setContainersToBeReleased(request.getReleaseList());
allocateRequest.setResourceAsk(request.getAskList());
allocateRequest.setResourceBlacklistRequest(request
.getResourceBlacklistRequest());
allocateRequest.setResponseID(request.getResponseId());
HPCAllocateResponse hpcAllocateResponse = applicationMaster
.allocate(allocateRequest);
AllocateResponse response = Records.newRecord(AllocateResponse.class);
response.setAllocatedContainers(hpcAllocateResponse
.getAllocatedContainers());
response.setNMTokens(hpcAllocateResponse.getNmTokens());
response.setCompletedContainersStatuses(hpcAllocateResponse.getCompletedContainers());
return response;
}
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:21,代码来源:HPCApplicationMasterProtocolImpl.java
示例3: getCountersResponseFromHistoryServer
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private GetCountersResponse getCountersResponseFromHistoryServer() {
GetCountersResponse countersResponse = Records
.newRecord(GetCountersResponse.class);
Counter counter = Records.newRecord(Counter.class);
CounterGroup counterGroup = Records.newRecord(CounterGroup.class);
Counters counters = Records.newRecord(Counters.class);
counter.setDisplayName("dummyCounter");
counter.setName("dummyCounter");
counter.setValue(1001);
counterGroup.setName("dummyCounters");
counterGroup.setDisplayName("dummyCounters");
counterGroup.setCounter("dummyCounter", counter);
counters.setCounterGroup("dummyCounters", counterGroup);
countersResponse.setCounters(counters);
return countersResponse;
}
示例4: initNodeIds
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private void initNodeIds(int nodeNums, int containerNums, Resource resource) {
Random portRandom = new Random();
Random ipRandom = new Random();
for (int i = 0; i < nodeNums; i++) {
NodeReport nodeReport = Records.newRecord(NodeReport.class);
nodeReport.setNumContainers(containerNums);
nodeReport.setNodeLabels(new HashSet<>());
nodeReport.setNodeState(NodeState.RUNNING);
nodeReport.setCapability(resource);
nodeReport.setUsed(Resource.newInstance(0, 0));
int port = 1024 + portRandom.nextInt(65535 - 1024 + 1);
StringBuilder hostStr = new StringBuilder();
for (int j = 0; j < 4; j++) {
hostStr.append(".").append(ipRandom.nextInt(256));
}
NodeId nodeId = NodeId.newInstance(hostStr.substring(1), port);
nodeReport.setNodeId(nodeId);
nodeReport.setHttpAddress(nodeId.getHost());
nodeReportList.add(nodeReport);
}
}
示例5: sendErrorMessage
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private void sendErrorMessage() throws InterruptedException {
List<TaskState> stateList = Arrays.asList(
TaskState.CONTAINER_ALLOCATED, TaskState.CONTAINER_LAUNCHED, TaskState.CONTAINER_RUNNING);
List<TaskStatus> list = statusManager.getTaskStatus(new HashSet<>(stateList));
while (list.size() < taskNum) {
Thread.sleep(2000);
list = statusManager.getTaskStatus(new HashSet<>(stateList));
}
String containerIdStr = list.get(0).getContainerId();
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ContainerStatus status = Records.newRecord(ContainerStatus.class);
status.setContainerId(containerId);
status.setState(ContainerState.COMPLETE);
status.setExitStatus(ExitStatusKey.SUCCEEDED.toInt());
onContainersCompleted(Collections.singletonList(status));
}
示例6: newInstance
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
@Public
@Stable
public static ContainerLaunchContext newInstance(
Map<String, LocalResource> localResources,
Map<String, String> environment, List<String> commands,
Map<String, ByteBuffer> serviceData, ByteBuffer tokens,
Map<ApplicationAccessType, String> acls) {
ContainerLaunchContext container =
Records.newRecord(ContainerLaunchContext.class);
container.setLocalResources(localResources);
container.setEnvironment(environment);
container.setCommands(commands);
container.setServiceData(serviceData);
container.setTokens(tokens);
container.setApplicationACLs(acls);
return container;
}
示例7: submitTasks
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
/**
* submit tasks to request containers for the tasks
*
* @param tasks
* a collection of tasks we want to ask container for
*/
private synchronized void submitTasks(Collection<TaskRecord> tasks) {
for (TaskRecord r : tasks) {
Resource resource = Records.newRecord(Resource.class);
if (r.taskRole == "server") {
resource.setMemory(serverMemoryMB);
resource.setVirtualCores(serverCores);
} else {
resource.setMemory(workerMemoryMB);
resource.setVirtualCores(workerCores);
}
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(this.appPriority);
r.containerRequest = new ContainerRequest(resource, null, null,
priority);
rmClient.addContainerRequest(r.containerRequest);
pendingTasks.add(r);
}
}
示例8: newInstance
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
@Public
@Stable
public static AllocateRequest newInstance(int responseID, float appProgress,
List<ResourceRequest> resourceAsk,
List<ContainerId> containersToBeReleased,
ResourceBlacklistRequest resourceBlacklistRequest,
List<ContainerResourceIncreaseRequest> increaseRequests) {
AllocateRequest allocateRequest = Records.newRecord(AllocateRequest.class);
allocateRequest.setResponseId(responseID);
allocateRequest.setProgress(appProgress);
allocateRequest.setAskList(resourceAsk);
allocateRequest.setReleaseList(containersToBeReleased);
allocateRequest.setResourceBlacklistRequest(resourceBlacklistRequest);
allocateRequest.setIncreaseRequests(increaseRequests);
return allocateRequest;
}
示例9: getNewTaskID
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private TaskId getNewTaskID() {
TaskId taskId = Records.newRecord(TaskId.class);
taskId.setId(++taskCounter);
taskId.setJobId(jobId);
taskId.setTaskType(mockTask.getType());
return taskId;
}
示例10: newInstance
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
public static AMRMTokenSecretManagerState newInstance(
AMRMTokenSecretManagerState state) {
AMRMTokenSecretManagerState data =
Records.newRecord(AMRMTokenSecretManagerState.class);
data.setCurrentMasterKey(state.getCurrentMasterKey());
data.setNextMasterKey(state.getNextMasterKey());
return data;
}
示例11: newInstance
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
@Public
@Stable
public static ApplicationSubmissionContext newInstance(
ApplicationId applicationId, String applicationName, String queue,
ContainerLaunchContext amContainer, boolean isUnmanagedAM,
boolean cancelTokensWhenComplete, int maxAppAttempts,
String applicationType, boolean keepContainers,
String appLabelExpression, ResourceRequest resourceRequest) {
ApplicationSubmissionContext context =
Records.newRecord(ApplicationSubmissionContext.class);
context.setApplicationId(applicationId);
context.setApplicationName(applicationName);
context.setQueue(queue);
context.setAMContainerSpec(amContainer);
context.setUnmanagedAM(isUnmanagedAM);
context.setCancelTokensWhenComplete(cancelTokensWhenComplete);
context.setMaxAppAttempts(maxAppAttempts);
context.setApplicationType(applicationType);
context.setKeepContainersAcrossApplicationAttempts(keepContainers);
context.setNodeLabelExpression(appLabelExpression);
context.setAMContainerResourceRequest(resourceRequest);
return context;
}
示例12: getDelegationToken
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private org.apache.hadoop.yarn.api.records.Token getDelegationToken(
final UserGroupInformation loggedInUser,
final ApplicationClientProtocol clientRMService, final String renewerString)
throws IOException, InterruptedException {
org.apache.hadoop.yarn.api.records.Token token = loggedInUser
.doAs(new PrivilegedExceptionAction<org.apache.hadoop.yarn.api.records.Token>() {
@Override
public org.apache.hadoop.yarn.api.records.Token run()
throws YarnException, IOException {
GetDelegationTokenRequest request = Records
.newRecord(GetDelegationTokenRequest.class);
request.setRenewer(renewerString);
return clientRMService.getDelegationToken(request)
.getRMDelegationToken();
}
});
return token;
}
示例13: renewDelegationToken
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private long renewDelegationToken(final UserGroupInformation loggedInUser,
final ApplicationClientProtocol clientRMService,
final org.apache.hadoop.yarn.api.records.Token dToken)
throws IOException, InterruptedException {
long nextExpTime = loggedInUser.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws YarnException, IOException {
RenewDelegationTokenRequest request = Records
.newRecord(RenewDelegationTokenRequest.class);
request.setDelegationToken(dToken);
return clientRMService.renewDelegationToken(request)
.getNextExpirationTime();
}
});
return nextExpTime;
}
示例14: renewDelegationToken
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be renewed only with kerberos authentication");
}
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<MRDelegationTokenIdentifier> token =
new Token<MRDelegationTokenIdentifier>(
protoToken.getIdentifier().array(), protoToken.getPassword()
.array(), new Text(protoToken.getKind()), new Text(
protoToken.getService()));
String user = UserGroupInformation.getCurrentUser().getShortUserName();
long nextExpTime = jhsDTSecretManager.renewToken(token, user);
RenewDelegationTokenResponse renewResponse = Records
.newRecord(RenewDelegationTokenResponse.class);
renewResponse.setNextExpirationTime(nextExpTime);
return renewResponse;
}
示例15: getDelegationToken
import org.apache.hadoop.yarn.util.Records; //导入依赖的package包/类
private Token getDelegationToken(
final UserGroupInformation loggedInUser,
final MRClientProtocol hsService, final String renewerString)
throws IOException, InterruptedException {
// Get the delegation token directly as it is a little difficult to setup
// the kerberos based rpc.
Token token = loggedInUser
.doAs(new PrivilegedExceptionAction<Token>() {
@Override
public Token run() throws IOException {
GetDelegationTokenRequest request = Records
.newRecord(GetDelegationTokenRequest.class);
request.setRenewer(renewerString);
return hsService.getDelegationToken(request).getDelegationToken();
}
});
return token;
}