当前位置: 首页>>代码示例>>Java>>正文


Java ContainerLaunchContext.setTokens方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.api.records.ContainerLaunchContext.setTokens方法的典型用法代码示例。如果您正苦于以下问题:Java ContainerLaunchContext.setTokens方法的具体用法?Java ContainerLaunchContext.setTokens怎么用?Java ContainerLaunchContext.setTokens使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.api.records.ContainerLaunchContext的用法示例。


在下文中一共展示了ContainerLaunchContext.setTokens方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testAppRecoverPath

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppRecoverPath() throws IOException {
  LOG.info("--- START: testAppRecoverPath ---");
  ApplicationSubmissionContext sub =
      Records.newRecord(ApplicationSubmissionContext.class);
  ContainerLaunchContext clc =
      Records.newRecord(ContainerLaunchContext.class);
  Credentials credentials = new Credentials();
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  ByteBuffer securityTokens =
      ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(securityTokens);
  sub.setAMContainerSpec(clc);
  testCreateAppSubmittedRecovery(sub);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestRMAppTransitions.java

示例2: launchDummyTask

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private synchronized void launchDummyTask(Container container){
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    String new_command = "./launcher.py";
    String cmd = new_command + " 1>"
        + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
        + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
        + "/stderr";
    ctx.setCommands(Collections.singletonList(cmd));
    ctx.setTokens(setupTokens());
    ctx.setLocalResources(this.workerResources);
    synchronized (this){
        this.nmClient.startContainerAsync(container, ctx);
    }
}
 
开发者ID:Intel-bigdata,项目名称:MXNetOnYARN,代码行数:15,代码来源:ApplicationMaster.java

示例3: addRMToken

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
/**
 * Adds RM delegation token to the given {@link ContainerLaunchContext} so that the AM can authenticate itself
 * with RM using the delegation token.
 */
protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }

  try {
    Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());

    Configuration config = yarnClient.getConfig();
    Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
      yarnClient.getRMDelegationToken(new Text(YarnUtils.getYarnTokenRenewer(config))),
      YarnUtils.getRMAddress(config));

    LOG.debug("Added RM delegation token {} for application {}", token, appId);
    credentials.addToken(token.getService(), token);

    context.setTokens(YarnUtils.encodeCredentials(credentials));
  } catch (YarnException | IOException e) {
    throw new RuntimeException("Failed to acquire RM delegation token", e);
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:26,代码来源:Hadoop21YarnAppClient.java

示例4: setTokensFor

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
	// for HBase
	obtainTokenForHBase(credentials, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for (Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token " + id + " with " + token);
		credentials.addToken(id, token);
	}
	try (DataOutputBuffer dob = new DataOutputBuffer()) {
		credentials.writeTokenStorageToStream(dob);

		if (LOG.isDebugEnabled()) {
			LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
		}

		ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
		amContainer.setTokens(securityTokens);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:Utils.java

示例5: newContainerLaunchContext

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
public static ContainerLaunchContext newContainerLaunchContext(
    Map<String, LocalResource> localResources,
    Map<String, String> environment, List<String> commands,
    Map<String, ByteBuffer> serviceData, ByteBuffer tokens,
    Map<ApplicationAccessType, String> acls) {
  ContainerLaunchContext container = recordFactory
      .newRecordInstance(ContainerLaunchContext.class);
  container.setLocalResources(localResources);
  container.setEnvironment(environment);
  container.setCommands(commands);
  container.setServiceData(serviceData);
  container.setTokens(tokens);
  container.setApplicationACLs(acls);
  return container;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:BuilderUtils.java

示例6: setupTokens

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void setupTokens(
    ContainerLaunchContext container, ContainerId containerID)
    throws IOException {
  Map<String, String> environment = container.getEnvironment();
  environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
      application.getWebProxyBase());
  // Set AppSubmitTime and MaxAppAttempts to be consumable by the AM.
  ApplicationId applicationId =
      application.getAppAttemptId().getApplicationId();
  environment.put(
      ApplicationConstants.APP_SUBMIT_TIME_ENV,
      String.valueOf(rmContext.getRMApps()
          .get(applicationId)
          .getSubmitTime()));
  environment.put(ApplicationConstants.MAX_APP_ATTEMPTS_ENV,
      String.valueOf(rmContext.getRMApps().get(
          applicationId).getMaxAppAttempts()));

  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  if (container.getTokens() != null) {
    // TODO: Don't do this kind of checks everywhere.
    dibb.reset(container.getTokens());
    credentials.readTokenStorageStream(dibb);
  }

  // Add AMRMToken
  Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
  if (amrmToken != null) {
    credentials.addToken(amrmToken.getService(), amrmToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:AMLauncher.java

示例7: addTimelineDelegationToken

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void addTimelineDelegationToken(
    ContainerLaunchContext clc) throws YarnException, IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = clc.getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  // If the timeline delegation token is already in the CLC, no need to add
  // one more
  for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials
      .getAllTokens()) {
    if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
      return;
    }
  }
  org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
      timelineDelegationToken = getTimelineDelegationToken();
  if (timelineDelegationToken == null) {
    return;
  }
  credentials.addToken(timelineService, timelineDelegationToken);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Add timline delegation token into credentials: "
        + timelineDelegationToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(tokens);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:YarnClientImpl.java

示例8: setupTokens

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
@Private
@VisibleForTesting
protected void setupTokens(
    ContainerLaunchContext container, ContainerId containerID)
    throws IOException {
  Map<String, String> environment = container.getEnvironment();
  environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
      application.getWebProxyBase());
  // Set AppSubmitTime to be consumable by the AM.
  ApplicationId applicationId =
      application.getAppAttemptId().getApplicationId();
  environment.put(
      ApplicationConstants.APP_SUBMIT_TIME_ENV,
      String.valueOf(rmContext.getRMApps()
          .get(applicationId)
          .getSubmitTime()));

  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = container.getTokens();
  if (tokens != null) {
    // TODO: Don't do this kind of checks everywhere.
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }

  // Add AMRMToken
  Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
  if (amrmToken != null) {
    credentials.addToken(amrmToken.getService(), amrmToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:AMLauncher.java

示例9: run

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
private void run(String[] args) throws Exception {
    if (args.length == 0) {
        System.out.println("Usage: [options] [commands..]");
        System.out.println("options: [-file filename] [-appcp appClasspath]");
        return;
    }
    this.initArgs(args);
    // Create yarnClient
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records
            .newRecord(ContainerLaunchContext.class);
    ApplicationSubmissionContext appContext = app
            .getApplicationSubmissionContext();
    // Submit application
    ApplicationId appId = appContext.getApplicationId();

    //add ctrl+c signal handler
    CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
    Signal intSignal = new Signal("INT");
    Signal.handle(intSignal, handler);

    // setup security token
    amContainer.setTokens(this.setupTokens());
    // setup cache-files and environment variables
    amContainer.setLocalResources(this.setupCacheFiles(appId));
    amContainer.setEnvironment(this.getEnvironment());
    String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
            + " -Xmx900m"
            + " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
            + this.cacheFileArg + ' ' + this.appArgs + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
            + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";

    LOG.debug(cmd);
    amContainer.setCommands(Collections.singletonList(cmd));

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(1024);
    capability.setVirtualCores(1);
    LOG.info("jobname=" + this.jobName + ",username=" + this.userName);

    appContext.setApplicationName(jobName + ":DMLC-YARN");
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);
    //appContext.setUser(userName);
    LOG.info("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED
            && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with"
            + " state " + appState + " at " + appReport.getFinishTime());
    if (!appReport.getFinalApplicationStatus().equals(
            FinalApplicationStatus.SUCCEEDED)) {
        System.err.println(appReport.getDiagnostics());
        System.out.println("Available queues:");
        for (QueueInfo q : yarnClient.getAllQueues()) {
          System.out.println(q.getQueueName());
        }

        yarnClient.killApplication(appId);
    }
}
 
开发者ID:Intel-bigdata,项目名称:MXNetOnYARN,代码行数:81,代码来源:Client.java

示例10: submitAppContext

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
public ApplicationId submitAppContext() throws YarnException, IOException, InterruptedException {

    yarnClient.start();

    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setApplicationName(yacopConfig.getName());

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    FileSystem fs = FileSystem.get(conf);
    //upload the local docker image
    if (yacopConfig.isEngineLocalImage()) {
      boolean dockerImgUploaded = uploadDockerImage(fs, appId.toString(), yacopConfig.getEngineImage());
      if (dockerImgUploaded) {
        LOG.info("Local Docker image " + yacopConfig.getEngineImage() + " uploaded successfully");
      } else {
        LOG.info("Local Docker image " + yacopConfig.getEngineImage() + " upload failed, existing");
        System.exit(3);
      }
    }
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null);
    configFile = serializeObj(appId, yacopConfig);
    addToLocalResources(fs, configFile, configFilePath, appId.toString(), localResources, null);

    Map<String, String> env = prepareEnv();
    List<String> commands = prepareCommands();

    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null);
    appContext.setAMContainerSpec(amContainer);
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);

    // set security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
      Credentials credentials = new Credentials();
      String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
      if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
      }

      // For now, only getting tokens for the default file-system.
      final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
      if (tokens != null) {
        for (Token<?> token : tokens) {
          LOG.info("Got dt for " + fs.getUri() + "; " + token);
        }
      }
      DataOutputBuffer dob = new DataOutputBuffer();
      credentials.writeTokenStorageToStream(dob);
      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
      amContainer.setTokens(fsTokens);
    }

    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);
    appContext.setQueue(amQueue);

    yarnClient.submitApplication(appContext);
    return appId;
  }
 
开发者ID:intel-hadoop,项目名称:yacop,代码行数:65,代码来源:ActionSubmitApp.java

示例11: addRMToken

import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; //导入方法依赖的package包/类
/**
 * Overrides parent method to adds RM delegation token to the given context. If YARN is running with HA RM,
 * delegation tokens for each RM service will be added.
 */
protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }

  try {
    Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
    org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);

    // The following logic is copied from ClientRMProxy.getRMDelegationTokenService, which is not available in
    // YARN older than 2.4
    List<String> services = new ArrayList<>();
    if (HAUtil.isHAEnabled(configuration)) {
      // If HA is enabled, we need to enumerate all RM hosts
      // and add the corresponding service name to the token service
      // Copy the yarn conf since we need to modify it to get the RM addresses
      YarnConfiguration yarnConf = new YarnConfiguration(configuration);
      for (String rmId : HAUtil.getRMHAIds(configuration)) {
        yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
        InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                                                           YarnConfiguration.DEFAULT_RM_ADDRESS,
                                                           YarnConfiguration.DEFAULT_RM_PORT);
        services.add(SecurityUtil.buildTokenService(address).toString());
      }
    } else {
      services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
    }

    Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());

    // casting needed for later Hadoop version
    @SuppressWarnings("RedundantCast")
    Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, (InetSocketAddress) null);

    token.setService(new Text(Joiner.on(',').join(services)));
    credentials.addToken(new Text(token.getService()), token);

    LOG.debug("Added RM delegation token {} for application {}", token, appId);
    credentials.addToken(token.getService(), token);

    context.setTokens(YarnUtils.encodeCredentials(credentials));

  } catch (Exception e) {
    throw Throwables.propagate(e);
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:51,代码来源:Hadoop23YarnAppClient.java


注:本文中的org.apache.hadoop.yarn.api.records.ContainerLaunchContext.setTokens方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。