当前位置: 首页>>代码示例>>Java>>正文


Java BuilderUtils.newApplicationAttemptId方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.server.utils.BuilderUtils.newApplicationAttemptId方法的典型用法代码示例。如果您正苦于以下问题:Java BuilderUtils.newApplicationAttemptId方法的具体用法?Java BuilderUtils.newApplicationAttemptId怎么用?Java BuilderUtils.newApplicationAttemptId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.server.utils.BuilderUtils的用法示例。


在下文中一共展示了BuilderUtils.newApplicationAttemptId方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addAppContainers

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private HashMap<String, String> addAppContainers(Application app) 
    throws IOException {
  Dispatcher dispatcher = new AsyncDispatcher();
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      app.getAppId(), 1);
  Container container1 = new MockContainer(appAttemptId, dispatcher, conf,
      app.getUser(), app.getAppId(), 1);
  Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
      app.getUser(), app.getAppId(), 2);
  nmContext.getContainers()
      .put(container1.getContainerId(), container1);
  nmContext.getContainers()
      .put(container2.getContainerId(), container2);

  app.getContainers().put(container1.getContainerId(), container1);
  app.getContainers().put(container2.getContainerId(), container2);
  HashMap<String, String> hash = new HashMap<String, String>();
  hash.put(container1.getContainerId().toString(), container1
      .getContainerId().toString());
  hash.put(container2.getContainerId().toString(), container2
      .getContainerId().toString());
  return hash;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestNMWebServicesApps.java

示例2: getMockContainer

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private static Container getMockContainer(ApplicationId appId, int id,
    String user) {
  Container c = mock(Container.class);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, id);
  when(c.getUser()).thenReturn(user);
  when(c.getContainerId()).thenReturn(cId);
  Credentials creds = new Credentials();
  creds.addToken(new Text("tok" + id), getToken(id));
  when(c.getCredentials()).thenReturn(creds);
  when(c.toString()).thenReturn(cId.toString());
  return c;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestResourceLocalizationService.java

示例3: createMockedContainer

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private Container createMockedContainer(ApplicationId appId, int containerId) {
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, containerId);
  Container c = mock(Container.class);
  when(c.getContainerId()).thenReturn(cId);
  ContainerLaunchContext launchContext = mock(ContainerLaunchContext.class);
  when(c.getLaunchContext()).thenReturn(launchContext);
  when(launchContext.getApplicationACLs()).thenReturn(
      new HashMap<ApplicationAccessType, String>());
  return c;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestApplication.java

示例4: setup

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Before
@SuppressWarnings("unchecked")
public void setup() {
  mockDelService = mock(DeletionService.class);
  conf = new YarnConfiguration();
  dispatcher = createDispatcher(conf);
  appEventHandler = mock(EventHandler.class);
  dispatcher.register(ApplicationEventType.class, appEventHandler);
  appId = BuilderUtils.newApplicationId(1234, 1);
  appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
  container11 = BuilderUtils.newContainerId(appAttemptId, 1);
  dirsHandler = new LocalDirsHandlerService();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestNonAggregatingLogHandler.java

示例5: verifyTokenWithTamperedID

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private void verifyTokenWithTamperedID(final Configuration conf,
    final CustomAM am, Token<ClientToAMTokenIdentifier> token)
    throws IOException {
  // Malicious user, messes with appId
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
  ClientToAMTokenIdentifier maliciousID =
      new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId(
        BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId()
          .getClusterTimestamp(), 42), 43), UserGroupInformation
        .getCurrentUser().getShortUserName());

  verifyTamperedToken(conf, am, token, ugi, maliciousID);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestClientToAMTokens.java

示例6: testAppAttemptMetrics

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test(timeout=5000)
public void testAppAttemptMetrics() throws Exception {
  AsyncDispatcher dispatcher = new InlineDispatcher();
  
  FifoScheduler scheduler = new FifoScheduler();
  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  RMContext rmContext = new RMContextImpl(dispatcher, null,
      null, null, null, null, null, null, null, writer, scheduler);
  ((RMContextImpl) rmContext).setSystemMetricsPublisher(
      mock(SystemMetricsPublisher.class));

  Configuration conf = new Configuration();
  scheduler.setRMContext(rmContext);
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, rmContext);
  QueueMetrics metrics = scheduler.getRootQueueMetrics();
  int beforeAppsSubmitted = metrics.getAppsSubmitted();

  ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);

  SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
  scheduler.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent);

  appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent2);

  int afterAppsSubmitted = metrics.getAppsSubmitted();
  Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
  scheduler.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFifoScheduler.java

示例7: testBlackListNodes

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test
public void testBlackListNodes() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();

  String host = "127.0.0.1";
  RMNode node =
      MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host);
  fs.handle(new NodeAddedSchedulerEvent(node));

  ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);

  createMockRMApp(appAttemptId, rm.getRMContext());

  SchedulerEvent appEvent =
      new AppAddedSchedulerEvent(appId, "default",
        "user");
  fs.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  fs.handle(attemptEvent);

  // Verify the blacklist can be updated independent of requesting containers
  fs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
      Collections.<ContainerId>emptyList(),
      Collections.singletonList(host), null);
  Assert.assertTrue(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
  fs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
      Collections.<ContainerId>emptyList(), null,
      Collections.singletonList(host));
  Assert.assertFalse(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestFifoScheduler.java

示例8: mockApp

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private static AppSchedulingInfo mockApp(String user) {
  AppSchedulingInfo app = mock(AppSchedulingInfo.class);
  when(app.getUser()).thenReturn(user);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
  when(app.getApplicationAttemptId()).thenReturn(id);
  return app;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestQueueMetrics.java

示例9: testContainerLogDirs

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test(timeout=30000)
public void testContainerLogDirs() throws IOException, YarnException {
  File absLogDir = new File("target",
    TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
  String logdirwithFile = absLogDir.toURI().toString();
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
  NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
  healthChecker.init(conf);
  LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
  NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
      new ApplicationACLsManager(conf), new NMNullStateStoreService());
  // Add an application and the corresponding containers
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  String user = "nobody";
  long clusterTimeStamp = 1234;
  ApplicationId appId = BuilderUtils.newApplicationId(recordFactory,
      clusterTimeStamp, 1);
  Application app = mock(Application.class);
  when(app.getUser()).thenReturn(user);
  when(app.getAppId()).thenReturn(appId);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId,
      appAttemptId, 0);
  nmContext.getApplications().put(appId, app);

  MockContainer container =
      new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
          appId, 1);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);   
  List<File> files = null;
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));
  
  // After container is completed, it is removed from nmContext
  nmContext.getContainers().remove(container1);
  Assert.assertNull(nmContext.getContainers().get(container1));
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));

  // Create a new context to check if correct container log dirs are fetched
  // on full disk.
  LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
  // good log dirs are empty and nm log dir is in the full log dir list.
  when(dirsHandlerForFullDisk.getLogDirs()).
      thenReturn(new ArrayList<String>());
  when(dirsHandlerForFullDisk.getLogDirsForRead()).
      thenReturn(Arrays.asList(new String[] {absLogDir.getAbsolutePath()}));
  nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk,
      new ApplicationACLsManager(conf), new NMNullStateStoreService());
  nmContext.getApplications().put(appId, app);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);
  List<File> dirs =
      ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  File containerLogDir = new File(absLogDir, appId + "/" + container1);
  Assert.assertTrue(dirs.contains(containerLogDir));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestContainerLogsPage.java

示例10: verifyLocalFileDeletion

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
private void verifyLocalFileDeletion(
    LogAggregationService logAggregationService) throws Exception {
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);

  // AppLogDir should be created
  File app1LogDir =
      new File(localLogDir, ConverterUtils.toString(application1));
  app1LogDir.mkdir();
  logAggregationService
      .handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(application1, 1);
  ContainerId container11 = BuilderUtils.newContainerId(appAttemptId, 1);
  // Simulate log-file creation
  writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
      "stderr", "syslog" });
  logAggregationService.handle(
      new LogHandlerContainerFinishedEvent(container11, 0));

  logAggregationService.handle(new LogHandlerAppFinishedEvent(
      application1));

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  // ensure filesystems were closed
  verify(logAggregationService).closeFileSystems(
      any(UserGroupInformation.class));
  verify(delSrvc).delete(eq(user), eq((Path) null),
    eq(new Path(app1LogDir.getAbsolutePath())));
  delSrvc.stop();
  
  String containerIdStr = ConverterUtils.toString(container11);
  File containerLogDir = new File(app1LogDir, containerIdStr);
  for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
    File f = new File(containerLogDir, fileType);
    Assert.assertFalse("check "+f, f.exists());
  }

  Assert.assertFalse(app1LogDir.exists());

  Path logFilePath =
      logAggregationService.getRemoteNodeLogFileForApp(application1,
          this.user);

  Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(
      logFilePath.toUri().getPath()).exists());
  
  dispatcher.await();
  
  ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
  };

  checkEvents(appEventHandler, expectedEvents, true, "getType",
      "getApplicationID");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestLogAggregationService.java

示例11: testLogAggregationForRealContainerLaunch

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test
public void testLogAggregationForRealContainerLaunch() throws IOException,
    InterruptedException, YarnException {

  this.containerManager.start();


  File scriptFile = new File(tmpDir, "scriptFile.sh");
  PrintWriter fileWriter = new PrintWriter(scriptFile);
  fileWriter.write("\necho Hello World! Stdout! > "
      + new File(localLogDir, "stdout"));
  fileWriter.write("\necho Hello World! Stderr! > "
      + new File(localLogDir, "stderr"));
  fileWriter.write("\necho Hello World! Syslog! > "
      + new File(localLogDir, "syslog"));
  fileWriter.close();

  ContainerLaunchContext containerLaunchContext =
      recordFactory.newRecordInstance(ContainerLaunchContext.class);
  // ////// Construct the Container-id
  ApplicationId appId = ApplicationId.newInstance(0, 0);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0);

  URL resource_alpha =
      ConverterUtils.getYarnUrlFromPath(localFS
          .makeQualified(new Path(scriptFile.getAbsolutePath())));
  LocalResource rsrc_alpha =
      recordFactory.newRecordInstance(LocalResource.class);
  rsrc_alpha.setResource(resource_alpha);
  rsrc_alpha.setSize(-1);
  rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
  rsrc_alpha.setType(LocalResourceType.FILE);
  rsrc_alpha.setTimestamp(scriptFile.lastModified());
  String destinationFile = "dest_file";
  Map<String, LocalResource> localResources = 
      new HashMap<String, LocalResource>();
  localResources.put(destinationFile, rsrc_alpha);
  containerLaunchContext.setLocalResources(localResources);
  List<String> commands = new ArrayList<String>();
  commands.add("/bin/bash");
  commands.add(scriptFile.getAbsolutePath());
  containerLaunchContext.setCommands(commands);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(containerLaunchContext,
        TestContainerManager.createContainerToken(
          cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user,
          context.getContainerTokenSecretManager()));
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  this.containerManager.startContainers(allRequests);
  
  BaseContainerManagerTest.waitForContainerState(this.containerManager,
      cId, ContainerState.COMPLETE);

  this.containerManager.handle(new CMgrCompletedAppsEvent(Arrays
      .asList(appId), CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
  this.containerManager.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestLogAggregationService.java

示例12: testFailedDirsLocalFileDeletionAfterUpload

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test
public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception {

  // setup conf and services
  DeletionService mockDelService = mock(DeletionService.class);
  File[] localLogDirs =
      TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass()
        .getName(), 7);
  final List<String> localLogDirPaths =
      new ArrayList<String>(localLogDirs.length);
  for (int i = 0; i < localLogDirs.length; i++) {
    localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
  }

  String localLogDirsString = StringUtils.join(localLogDirPaths, ",");

  this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
  this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
    this.remoteRootLogDir.getAbsolutePath());
  this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(application1, 1);

  this.dirsHandler = new LocalDirsHandlerService();
  LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);

  LogAggregationService logAggregationService =
      spy(new LogAggregationService(dispatcher, this.context, mockDelService,
        mockDirsHandler));
  AbstractFileSystem spylfs =
      spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
  FileContext lfs = FileContext.getFileContext(spylfs, conf);
  doReturn(lfs).when(logAggregationService).getLocalFileContext(
    isA(Configuration.class));

  logAggregationService.init(this.conf);
  logAggregationService.start();

  TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService,
    application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs,
    localLogDirs);

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  verify(logAggregationService).closeFileSystems(
    any(UserGroupInformation.class));

  ApplicationEvent expectedEvents[] =
      new ApplicationEvent[] {
          new ApplicationEvent(appAttemptId.getApplicationId(),
            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
          new ApplicationEvent(appAttemptId.getApplicationId(),
            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };

  checkEvents(appEventHandler, expectedEvents, true, "getType",
    "getApplicationID");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestLogAggregationService.java

示例13: createAndGetApplicationReport

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Override
public ApplicationReport createAndGetApplicationReport(String clientUserName,
    boolean allowAccess) {
  this.readLock.lock();

  try {
    ApplicationAttemptId currentApplicationAttemptId = null;
    org.apache.hadoop.yarn.api.records.Token clientToAMToken = null;
    String trackingUrl = UNAVAILABLE;
    String host = UNAVAILABLE;
    String origTrackingUrl = UNAVAILABLE;
    int rpcPort = -1;
    ApplicationResourceUsageReport appUsageReport =
        RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT;
    FinalApplicationStatus finishState = getFinalApplicationStatus();
    String diags = UNAVAILABLE;
    float progress = 0.0f;
    org.apache.hadoop.yarn.api.records.Token amrmToken = null;
    if (allowAccess) {
      trackingUrl = getDefaultProxyTrackingUrl();
      if (this.currentAttempt != null) {
        currentApplicationAttemptId = this.currentAttempt.getAppAttemptId();
        trackingUrl = this.currentAttempt.getTrackingUrl();
        origTrackingUrl = this.currentAttempt.getOriginalTrackingUrl();
        if (UserGroupInformation.isSecurityEnabled()) {
          // get a token so the client can communicate with the app attempt
          // NOTE: token may be unavailable if the attempt is not running
          Token<ClientToAMTokenIdentifier> attemptClientToAMToken =
              this.currentAttempt.createClientToken(clientUserName);
          if (attemptClientToAMToken != null) {
            clientToAMToken = BuilderUtils.newClientToAMToken(
                attemptClientToAMToken.getIdentifier(),
                attemptClientToAMToken.getKind().toString(),
                attemptClientToAMToken.getPassword(),
                attemptClientToAMToken.getService().toString());
          }
        }
        host = this.currentAttempt.getHost();
        rpcPort = this.currentAttempt.getRpcPort();
        appUsageReport = currentAttempt.getApplicationResourceUsageReport();
        progress = currentAttempt.getProgress();
      }
      diags = this.diagnostics.toString();

      if (currentAttempt != null && 
          currentAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
        if (getApplicationSubmissionContext().getUnmanagedAM() &&
            clientUserName != null && getUser().equals(clientUserName)) {
          Token<AMRMTokenIdentifier> token = currentAttempt.getAMRMToken();
          if (token != null) {
            amrmToken = BuilderUtils.newAMRMToken(token.getIdentifier(),
                token.getKind().toString(), token.getPassword(),
                token.getService().toString());
          }
        }
      }

      RMAppMetrics rmAppMetrics = getRMAppMetrics();
      appUsageReport.setMemorySeconds(rmAppMetrics.getMemorySeconds());
      appUsageReport.setVcoreSeconds(rmAppMetrics.getVcoreSeconds());
      appUsageReport.setGcoreSeconds(rmAppMetrics.getGcoreSeconds());
    }

    if (currentApplicationAttemptId == null) {
      currentApplicationAttemptId = 
          BuilderUtils.newApplicationAttemptId(this.applicationId, 
              DUMMY_APPLICATION_ATTEMPT_NUMBER);
    }

    return BuilderUtils.newApplicationReport(this.applicationId,
        currentApplicationAttemptId, this.user, this.queue,
        this.name, host, rpcPort, clientToAMToken,
        createApplicationState(), diags,
        trackingUrl, this.startTime, this.finishTime, finishState,
        appUsageReport, origTrackingUrl, progress, this.applicationType, 
        amrmToken, applicationTags);
  } finally {
    this.readLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:81,代码来源:RMAppImpl.java

示例14: testExpireWhileRunning

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test
public void testExpireWhileRunning() {

  DrainDispatcher drainDispatcher = new DrainDispatcher();
  EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
  EventHandler generic = mock(EventHandler.class);
  drainDispatcher.register(RMAppAttemptEventType.class,
      appAttemptEventHandler);
  drainDispatcher.register(RMNodeEventType.class, generic);
  drainDispatcher.init(new YarnConfiguration());
  drainDispatcher.start();
  NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
  ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);

  Resource resource = BuilderUtils.newResource(512, 1, 1);
  Priority priority = BuilderUtils.newPriority(5);

  Container container = BuilderUtils.newContainer(containerId, nodeId,
      "host:3465", resource, priority, null);

  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
  when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
  when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
  when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
  when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
  RMContainer rmContainer = new RMContainerImpl(container, appAttemptId,
      nodeId, "user", rmContext);

  assertEquals(RMContainerState.NEW, rmContainer.getState());
  assertEquals(resource, rmContainer.getAllocatedResource());
  assertEquals(nodeId, rmContainer.getAllocatedNode());
  assertEquals(priority, rmContainer.getAllocatedPriority());
  verify(writer).containerStarted(any(RMContainer.class));
  verify(publisher).containerCreated(any(RMContainer.class), anyLong());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.START));
  drainDispatcher.await();
  assertEquals(RMContainerState.ALLOCATED, rmContainer.getState());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.ACQUIRED));
  drainDispatcher.await();
  assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.LAUNCHED));
  drainDispatcher.await();
  assertEquals(RMContainerState.RUNNING, rmContainer.getState());
  assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_000001/user",
      rmContainer.getLogURL());

  // In RUNNING state. Verify EXPIRE and associated actions.
  reset(appAttemptEventHandler);
  ContainerStatus containerStatus = SchedulerUtils
      .createAbnormalContainerStatus(containerId,
          SchedulerUtils.EXPIRED_CONTAINER);
  rmContainer.handle(new RMContainerFinishedEvent(containerId,
      containerStatus, RMContainerEventType.EXPIRE));
  drainDispatcher.await();
  assertEquals(RMContainerState.RUNNING, rmContainer.getState());
  verify(writer, never()).containerFinished(any(RMContainer.class));
  verify(publisher, never()).containerFinished(any(RMContainer.class),
      anyLong());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestRMContainerImpl.java

示例15: testHeadroom

import org.apache.hadoop.yarn.server.utils.BuilderUtils; //导入方法依赖的package包/类
@Test (timeout = 50000)
public void testHeadroom() throws Exception {
  
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();

  // Add a node
  RMNode n1 =
      MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
  fs.handle(new NodeAddedSchedulerEvent(n1));
  
  // Add two applications
  ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(
      appId1, 1);
  createMockRMApp(appAttemptId1, rm.getRMContext());
  SchedulerEvent appEvent =
      new AppAddedSchedulerEvent(appId1, "queue", "user");
  fs.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
  fs.handle(attemptEvent);

  ApplicationId appId2 = BuilderUtils.newApplicationId(200, 2);
  ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(
      appId2, 1);
  createMockRMApp(appAttemptId2, rm.getRMContext());
  SchedulerEvent appEvent2 =
      new AppAddedSchedulerEvent(appId2, "queue", "user");
  fs.handle(appEvent2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
  fs.handle(attemptEvent2);

  List<ContainerId> emptyId = new ArrayList<ContainerId>();
  List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>();

  // Set up resource requests
  
  // Ask for a 1 GB container for app 1
  List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
  ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1));
  fs.allocate(appAttemptId1, ask1, emptyId, null, null);

  // Ask for a 2 GB container for app 2
  List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
  ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(2 * GB, 1), 1));
  fs.allocate(appAttemptId2, ask2, emptyId, null, null);
  
  // Trigger container assignment
  fs.handle(new NodeUpdateSchedulerEvent(n1));
  
  // Get the allocation for the applications and verify headroom
  Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation1.getResourceLimit().getMemory());

  Allocation allocation2 = fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation2.getResourceLimit().getMemory());

  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestFifoScheduler.java


注:本文中的org.apache.hadoop.yarn.server.utils.BuilderUtils.newApplicationAttemptId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。