当前位置: 首页>>代码示例>>Java>>正文


Java ApplicationId类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.api.records.ApplicationId的典型用法代码示例。如果您正苦于以下问题:Java ApplicationId类的具体用法?Java ApplicationId怎么用?Java ApplicationId使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ApplicationId类属于org.apache.hadoop.yarn.api.records包,在下文中一共展示了ApplicationId类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createStubbedJob

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestJobImpl.java

示例2: warnUserPage

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
/**
 * Warn the user that the link may not be safe!
 * @param resp the http response
 * @param link the link to point to
 * @param user the user that owns the link.
 * @throws IOException on any error.
 */
private static void warnUserPage(HttpServletResponse resp, String link, 
    String user, ApplicationId id) throws IOException {
  //Set the cookie when we warn which overrides the query parameter
  //This is so that if a user passes in the approved query parameter without
  //having first visited this page then this page will still be displayed 
  resp.addCookie(makeCheckCookie(id, false));
  resp.setContentType(MimeType.HTML);
  Page p = new Page(resp.getWriter());
  p.html().
    h1("WARNING: The following page may not be safe!").
    h3().
    _("click ").a(link, "here").
    _(" to continue to an Application Master web interface owned by ", user).
    _().
  _();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:WebAppProxyServlet.java

示例3: ApplicationFinishedEvent

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
public ApplicationFinishedEvent(
    ApplicationId appId,
    String diagnosticsInfo,
    FinalApplicationStatus appStatus,
    YarnApplicationState state,
    ApplicationAttemptId latestAppAttemptId,
    long finishedTime,
    RMAppMetrics appMetrics) {
  super(SystemMetricsEventType.APP_FINISHED, finishedTime);
  this.appId = appId;
  this.diagnosticsInfo = diagnosticsInfo;
  this.appStatus = appStatus;
  this.latestAppAttemptId = latestAppAttemptId;
  this.state = state;
  this.appMetrics=appMetrics;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:ApplicationFinishedEvent.java

示例4: removeApplicationStateInternal

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Override
protected void removeApplicationStateInternal(ApplicationStateData appState)
    throws IOException {
  ApplicationId appId =
      appState.getApplicationSubmissionContext().getApplicationId();
  String appKey = getApplicationNodeKey(appId);
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.delete(bytes(appKey));
      for (ApplicationAttemptId attemptId : appState.attempts.keySet()) {
        String attemptKey = getApplicationAttemptNodeKey(appKey, attemptId);
        batch.delete(bytes(attemptKey));
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("Removing state for app " + appId + " and "
            + appState.attempts.size() + " attempts" + " at " + appKey);
      }
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:LeveldbRMStateStore.java

示例5: release

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Override
public void release(ApplicationId applicationId, String resourceKey)
    throws YarnException {
  ReleaseSharedCacheResourceRequest request = Records.newRecord(
      ReleaseSharedCacheResourceRequest.class);
  request.setAppId(applicationId);
  request.setResourceKey(resourceKey);
  try {
    // We do not care about the response because it is empty.
    this.scmClient.release(request);
  } catch (Exception e) {
    // Just catching IOException isn't enough.
    // RPC call can throw ConnectionException.
    throw new YarnException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:SharedCacheClientImpl.java

示例6: startStoreWithResources

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
private Map<String, String> startStoreWithResources() throws Exception {
  Map<String, String> initialCachedResources = new HashMap<String, String>();
  int count = 10;
  for (int i = 0; i < count; i++) {
    String key = String.valueOf(i);
    String fileName = key + ".jar";
    initialCachedResources.put(key, fileName);
  }
  doReturn(new ArrayList<ApplicationId>()).when(checker)
      .getActiveApplications();
  doReturn(initialCachedResources).when(store).getInitialCachedResources(
      isA(FileSystem.class), isA(Configuration.class));
  this.store.init(new Configuration());
  this.store.start();
  return initialCachedResources;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestInMemorySCMStore.java

示例7: testNoDeletionofStagingOnReboot

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Test (timeout = 30000)
public void testNoDeletionofStagingOnReboot() throws IOException {
  conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
  fs = mock(FileSystem.class);
  when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  Path stagingDir = MRApps.getStagingAreaDir(conf, user);
  when(fs.exists(stagingDir)).thenReturn(true);
  ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
      0);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
  ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
  Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
  MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
      JobStateInternal.REBOOT, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
  appMaster.init(conf);
  appMaster.start();
  //shutdown the job, not the lastRetry
  appMaster.shutDownJob();
  //test whether notifyIsLastAMRetry called
  Assert.assertEquals(false, ((TestMRApp)appMaster).getTestIsLastAMRetry());
  verify(fs, times(0)).delete(stagingJobPath, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestStagingCleanup.java

示例8: render

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Override
protected void render(Block html) {

  TBODY<TABLE<BODY<Hamlet>>> tableBody =
    html
      .body()
        .table("#applications")
          .thead()
            .tr()
              .td()._("ApplicationId")._()
              .td()._("ApplicationState")._()
            ._()
           ._()
           .tbody();
  for (Entry<ApplicationId, Application> entry : this.nmContext
      .getApplications().entrySet()) {
    AppInfo info = new AppInfo(entry.getValue());
    tableBody
      .tr()
        .td().a(url("application", info.getId()), info.getId())._()
        .td()._(info.getState())
        ._()
      ._();
  }
  tableBody._()._()._();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:AllApplicationsPage.java

示例9: testHandleRMHABeforeSubmitApplicationCallWithSavedApplicationState

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Test
public void
    testHandleRMHABeforeSubmitApplicationCallWithSavedApplicationState()
        throws Exception {
  // start two RMs, and transit rm1 to active, rm2 to standby
  startRMs();

  // get a new applicationId from rm1
  ApplicationId appId = rm1.getNewAppId().getApplicationId();

  // Do the failover
  explicitFailover();

  // submit the application with previous assigned applicationId
  // to current active rm: rm2
  RMApp app1 =
      rm2.submitApp(200, "", UserGroupInformation
          .getCurrentUser().getShortUserName(), null, false, null,
          configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
              YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null,
          false, false, true, appId);

  // verify application submission
  verifySubmitApp(rm2, app1, appId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestSubmitApplicationWithRMHA.java

示例10: testContainerReport

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Test
public void testContainerReport() throws IOException, YarnException {
  ApplicationId appId = ApplicationId.newInstance(0, 1);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
  GetContainerReportRequest request =
      GetContainerReportRequest.newInstance(containerId);
  GetContainerReportResponse response =
      clientService.getContainerReport(request);
  ContainerReport container = response.getContainerReport();
  Assert.assertNotNull(container);
  Assert.assertEquals(containerId, container.getContainerId());
  Assert.assertEquals("http://0.0.0.0:8188/applicationhistory/logs/" +
      "test host:100/container_0_0001_01_000001/" +
      "container_0_0001_01_000001/user1", container.getLogUrl());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestApplicationHistoryClientService.java

示例11: submitApplication

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Override
public void submitApplication(ApplicationSubmissionContext context)
    throws IOException {
  int waitingTime = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS);
  int noOfTimes = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX);
  ApplicationId applicationId = context.getApplicationId();
  String nodeName = checkAndWaitForResourcesToBeAvailable(
      applicationId, waitingTime, noOfTimes);
      
  HPCCommandExecutor.launchContainer(
  		context.getAMContainerSpec(),
      ContainerId.newInstance(
          ApplicationAttemptId.newInstance(applicationId, 1), 1).toString(),
      context.getApplicationName(), conf, applicationId.getId(), nodeName);
  HPCCommandExecutor.setJobState(applicationId.getId(), "running::0", conf);
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:22,代码来源:SlurmApplicationClient.java

示例12: testUberDecision

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
private boolean testUberDecision(Configuration conf) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  MRAppMetrics mrAppMetrics = MRAppMetrics.create();
  JobImpl job =
      new JobImpl(jobId, ApplicationAttemptId.newInstance(
        ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
        null, new JobTokenSecretManager(), new Credentials(), null, null,
        mrAppMetrics, null, true, null, 0, null, null, null, null);
  InitTransition initTransition = getInitTransition(2);
  JobEvent mockJobEvent = mock(JobEvent.class);
  initTransition.transition(job, mockJobEvent);
  boolean isUber = job.isUber();
  return isUber;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestJobImpl.java

示例13: toApplicationId

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
public static ApplicationId toApplicationId(
    String appIdStr) {
  Iterator<String> it = _split(appIdStr).iterator();
  if (!it.next().equals(APPLICATION_PREFIX)) {
    throw new IllegalArgumentException("Invalid ApplicationId prefix: "
        + appIdStr + ". The valid ApplicationId should start with prefix "
        + APPLICATION_PREFIX);
  }
  try {
    return toApplicationId(it);
  } catch (NumberFormatException n) {
    throw new IllegalArgumentException("Invalid ApplicationId: "
        + appIdStr, n);
  } catch (NoSuchElementException e) {
    throw new IllegalArgumentException("Invalid ApplicationId: "
        + appIdStr, e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:ConverterUtils.java

示例14: getNewApplication

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
@Override
public NewApplicationResponse getNewApplication() throws IOException {
  NewApplicationResponse response = new NewApplicationResponse();

  int priority = conf.getInt(YARN_APPLICATION_HPC_AM_PRIORITY,
      DEFAULT_YARN_APPLICATION_HPC_AM_PRIORITY);
  int amMemory = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB,
      DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_MEMORY_MB);
  int cpus = conf.getInt(YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES,
      DEFAULT_YARN_APPLICATION_HPC_AM_RESOURCE_CPU_VCORES);

  SocketWrapper socket = SocketFactory.createSocket();
  String hostName = socket.getHostName();
  int port = socket.getPort();
  int jobid = PBSCommandExecutor.submitAndGetPBSJobId(conf, priority,
      amMemory, cpus, hostName, port);
  SocketCache.addSocket(jobid, socket);

  ApplicationId applicationId = ApplicationId.newInstance(
      getClusterTimestamp(), jobid);
  response.setApplicationId(applicationId);
  response.setMaxCapability(getMaxCapability());
  response.setMinCapability(getMinCapability());
  return response;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:26,代码来源:PBSApplicationClient.java

示例15: waitForApplicationState

import org.apache.hadoop.yarn.api.records.ApplicationId; //导入依赖的package包/类
static void waitForApplicationState(ContainerManagerImpl containerManager,
    ApplicationId appID, ApplicationState finalState)
    throws InterruptedException {
  // Wait for app-finish
  Application app =
      containerManager.getContext().getApplications().get(appID);
  int timeout = 0;
  while (!(app.getApplicationState().equals(finalState))
      && timeout++ < 15) {
    LOG.info("Waiting for app to reach " + finalState
        + ".. Current state is "
        + app.getApplicationState());
    Thread.sleep(1000);
  }

  Assert.assertTrue("App is not in " + finalState + " yet!! Timedout!!",
      app.getApplicationState().equals(finalState));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:BaseContainerManagerTest.java


注:本文中的org.apache.hadoop.yarn.api.records.ApplicationId类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。