本文整理汇总了Java中org.apache.hadoop.yarn.api.records.ContainerLaunchContext.setEnvironment方法的典型用法代码示例。如果您正苦于以下问题:Java ContainerLaunchContext.setEnvironment方法的具体用法?Java ContainerLaunchContext.setEnvironment怎么用?Java ContainerLaunchContext.setEnvironment使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.yarn.api.records.ContainerLaunchContext
的用法示例。
在下文中一共展示了ContainerLaunchContext.setEnvironment方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newContainerLaunchContext
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
public static ContainerLaunchContext newContainerLaunchContext(
Map<String, LocalResource> localResources,
Map<String, String> environment, List<String> commands,
Map<String, ByteBuffer> serviceData, ByteBuffer tokens,
Map<ApplicationAccessType, String> acls) {
ContainerLaunchContext container = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
container.setLocalResources(localResources);
container.setEnvironment(environment);
container.setCommands(commands);
container.setServiceData(serviceData);
container.setTokens(tokens);
container.setApplicationACLs(acls);
return container;
}
示例2: createContainerContext
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private ContainerLaunchContext createContainerContext() {
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setLocalResources(setupEsYarnJar());
amContainer.setEnvironment(setupEnv());
amContainer.setCommands(setupCmd());
return amContainer;
}
示例3: launchContainer
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void launchContainer(Container container) {
ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
ctx.setEnvironment(setupEnv(appConfig));
ctx.setLocalResources(setupEsZipResource(appConfig));
ctx.setCommands(setupEsScript(appConfig));
log.info("About to launch container for command: " + ctx.getCommands());
// setup container
Map<String, ByteBuffer> startContainer = nmRpc.startContainer(container, ctx);
log.info("Started container " + container);
}
示例4: startAppMaster
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private ApplicationReport startAppMaster(ApplicationSubmissionContext appContext) throws Exception {
appContext.setMaxAppAttempts(MAX_ATTEMPT);
Map<String, LocalResource> localResources = new HashMap<>();
Set<Path> shippedPaths = new HashSet<>();
collectLocalResources(localResources, shippedPaths);
final ContainerLaunchContext amContainer = setupApplicationMasterContainer(false, true, false);
amContainer.setLocalResources(localResources);
final String classPath = localResources.keySet().stream().collect(Collectors.joining(File.pathSeparator));
final String shippedFiles = shippedPaths.stream().map(Path::toString)
.collect(Collectors.joining(","));
// Setup CLASSPATH and environment variables for ApplicationMaster
ApplicationId appId = appContext.getApplicationId();
final Map<String, String> appMasterEnv = setUpAmEnvironment(
appId,
classPath,
shippedFiles,
getDynamicPropertiesEncoded()
);
amContainer.setEnvironment(appMasterEnv);
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(getFlinkConfiguration()
.getInteger(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
capability.setVirtualCores(1);
appContext.setApplicationName(job.name());
appContext.setApplicationType(ATHENAX_APPLICATION_TYPE);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setApplicationTags(Collections.singleton(job.metadata().serialize()));
if (job.queue() != null) {
appContext.setQueue(job.queue());
}
LOG.info("Submitting application master {}", appId);
yarnClient.submitApplication(appContext);
PollDeploymentStatus poll = new PollDeploymentStatus(appId);
YARN_POLL_EXECUTOR.submit(poll);
try {
return poll.result.get();
} catch (ExecutionException e) {
LOG.warn("Failed to deploy {}, cause: {}", appId.toString(), e.getCause());
yarnClient.killApplication(appId);
throw (Exception) e.getCause();
}
}
示例5: submitApp
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void submitApp()
throws YarnException, InterruptedException, IOException {
// ask for new application
GetNewApplicationRequest newAppRequest =
Records.newRecord(GetNewApplicationRequest.class);
GetNewApplicationResponse newAppResponse =
rm.getClientRMService().getNewApplication(newAppRequest);
appId = newAppResponse.getApplicationId();
// submit the application
final SubmitApplicationRequest subAppRequest =
Records.newRecord(SubmitApplicationRequest.class);
ApplicationSubmissionContext appSubContext =
Records.newRecord(ApplicationSubmissionContext.class);
appSubContext.setApplicationId(appId);
appSubContext.setMaxAppAttempts(1);
appSubContext.setQueue(queue);
appSubContext.setPriority(Priority.newInstance(0));
ContainerLaunchContext conLauContext =
Records.newRecord(ContainerLaunchContext.class);
conLauContext.setApplicationACLs(
new HashMap<ApplicationAccessType, String>());
conLauContext.setCommands(new ArrayList<String>());
conLauContext.setEnvironment(new HashMap<String, String>());
conLauContext.setLocalResources(new HashMap<String, LocalResource>());
conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
appSubContext.setAMContainerSpec(conLauContext);
appSubContext.setUnmanagedAM(true);
subAppRequest.setApplicationSubmissionContext(appSubContext);
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws YarnException {
rm.getClientRMService().submitApplication(subAppRequest);
return null;
}
});
LOG.info(MessageFormat.format("Submit a new application {0}", appId));
// waiting until application ACCEPTED
RMApp app = rm.getRMContext().getRMApps().get(appId);
while(app.getState() != RMAppState.ACCEPTED) {
Thread.sleep(10);
}
// Waiting until application attempt reach LAUNCHED
// "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
.getCurrentAppAttempt().getAppAttemptId();
RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
.getCurrentAppAttempt();
while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
Thread.sleep(10);
}
}
示例6: run
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}