当前位置: 首页>>代码示例>>Java>>正文


Java Priority.newInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.api.records.Priority.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java Priority.newInstance方法的具体用法?Java Priority.newInstance怎么用?Java Priority.newInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.api.records.Priority的用法示例。


在下文中一共展示了Priority.newInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handle

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Override
public void handle(ContainerAllocatorEvent event) {
  ContainerId cId =
      ContainerId.newContainerId(getContext().getApplicationAttemptId(),
        containerCount++);
  NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
  Resource resource = Resource.newInstance(1234, 2, 2);
  ContainerTokenIdentifier containerTokenIdentifier =
      new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
      resource, System.currentTimeMillis() + 10000, 42, 42,
      Priority.newInstance(0), 0);
  Token containerToken = newContainerToken(nodeId, "password".getBytes(),
        containerTokenIdentifier);
  Container container = Container.newInstance(cId, nodeId,
      NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
  JobID id = TypeConverter.fromYarn(applicationId);
  JobId jobId = TypeConverter.toYarn(id);
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.REDUCE,
      100)));
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.MAP,
      100)));
  getContext().getEventHandler().handle(
      new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
          container, null));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:MRApp.java

示例2: createContainerToken

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
public static Token createContainerToken(ContainerId cId, long rmIdentifier,
    NodeId nodeId, String user,
    NMContainerTokenSecretManager containerTokenSecretManager,
    LogAggregationContext logAggregationContext)
    throws IOException {
  Resource r = BuilderUtils.newResource(1024, 1);
  ContainerTokenIdentifier containerTokenIdentifier =
      new ContainerTokenIdentifier(cId, nodeId.toString(), user, r,
        System.currentTimeMillis() + 100000L, 123, rmIdentifier,
        Priority.newInstance(0), 0, logAggregationContext);
  Token containerToken =
      BuilderUtils
        .newContainerToken(nodeId, containerTokenSecretManager
          .retrievePassword(containerTokenIdentifier),
          containerTokenIdentifier);
  return containerToken;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestContainerManager.java

示例3: setupContainerAskForRM

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 */
private ContainerRequest setupContainerAskForRM() {
  // setup requirements for hosts
  // using * as any host will do for the distributed shell app
  // set the priority for the request
  // TODO - what is the range for priority? how to decide?
  Priority pri = Priority.newInstance(requestPriority);

  // Set up resource type requirements
  // For now, memory and CPU are supported so we set memory and cpu requirements
  Resource capability = Resource.newInstance(containerMemory,
    containerVirtualCores, containerGpuCores);

  ContainerRequest request = new ContainerRequest(capability, null, null,
      pri);
  LOG.info("Requested container ask: " + request.toString());
  return request;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:ApplicationMaster.java

示例4: testFillInRacks

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Test
public void testFillInRacks() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
 
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          new String[] {"/rack2"}, Priority.newInstance(1));
  client.addContainerRequest(request);
  verifyResourceRequest(client, request, "host1", true);
  verifyResourceRequest(client, request, "host2", true);
  verifyResourceRequest(client, request, "/rack1", true);
  verifyResourceRequest(client, request, "/rack2", true);
  verifyResourceRequest(client, request, ResourceRequest.ANY, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestAMRMClientContainerRequest.java

示例5: testDifferentLocalityRelaxationSamePriority

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
  
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  ContainerRequest request2 =
      new ContainerRequest(capability, new String[] {"host3"},
          null, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestAMRMClientContainerRequest.java

示例6: testLocalityRelaxationDifferentLevels

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Test (expected = InvalidContainerRequestException.class)
public void testLocalityRelaxationDifferentLevels() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
  
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  ContainerRequest request2 =
      new ContainerRequest(capability, null,
          new String[] {"rack1"}, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestAMRMClientContainerRequest.java

示例7: setupContainerAskForRM

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 */
private ContainerRequest setupContainerAskForRM() {
  // Set up resource type requirements
  // For now, memory and CPU are supported so we set memory and cpu requirements
  Resource capability = Resource.newInstance(containerMemory, containerVCores);
  Priority priority = Priority.newInstance(0);

  return new ContainerRequest(capability, null, null, priority);
}
 
开发者ID:Intel-bigdata,项目名称:TensorFlowOnYARN,代码行数:14,代码来源:ApplicationMaster.java

示例8: newContainerToken

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
public static Token newContainerToken(ContainerId cId, String host,
    int port, String user, Resource r, long expiryTime, int masterKeyId,
    byte[] password, long rmIdentifier) throws IOException {
  ContainerTokenIdentifier identifier =
      new ContainerTokenIdentifier(cId, host + ":" + port, user, r,
        expiryTime, masterKeyId, rmIdentifier, Priority.newInstance(0), 0);
  return newContainerToken(BuilderUtils.newNodeId(host, port), password,
      identifier);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:BuilderUtils.java

示例9: createContainerTokenId

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
private static ContainerTokenIdentifier createContainerTokenId(
    ContainerId cid, NodeId nodeId, String user,
    NMContainerTokenSecretManager secretMgr) throws IOException {
  long rmid = cid.getApplicationAttemptId().getApplicationId()
      .getClusterTimestamp();
  ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid,
      nodeId.toString(), user, BuilderUtils.newResource(1024, 1),
      System.currentTimeMillis() + 100000L,
      secretMgr.getCurrentKey().getKeyId(), rmid,
      Priority.newInstance(0), 0);
  Token token = BuilderUtils.newContainerToken(nodeId,
      secretMgr.createPassword(ctid), ctid);
  return BuilderUtils.newContainerTokenIdentifier(token);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestNMContainerTokenSecretManager.java

示例10: FSAppAttempt

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
public FSAppAttempt(FairScheduler scheduler,
    ApplicationAttemptId applicationAttemptId, String user, FSLeafQueue queue,
    ActiveUsersManager activeUsersManager, RMContext rmContext) {
  super(applicationAttemptId, user, queue, activeUsersManager, rmContext);

  this.scheduler = scheduler;
  this.startTime = scheduler.getClock().getTime();
  this.priority = Priority.newInstance(1);
  this.resourceWeights = new ResourceWeights();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:FSAppAttempt.java

示例11: testInvalidValidWhenOldRemoved

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Test
public void testInvalidValidWhenOldRemoved() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
  
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  
  client.removeContainerRequest(request1);

  ContainerRequest request2 =
      new ContainerRequest(capability, new String[] {"host3"},
          null, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
  
  client.removeContainerRequest(request2);
  
  ContainerRequest request3 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request3);
  
  client.removeContainerRequest(request3);
  
  ContainerRequest request4 =
      new ContainerRequest(capability, null,
          new String[] {"rack1"}, Priority.newInstance(1), true);
  client.addContainerRequest(request4);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestAMRMClientContainerRequest.java

示例12: addContainerRequest

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
private void addContainerRequest(TaskStatusLocator locator, ContainerRequest request) {
  nextContainerRequestPriority = Priority.newInstance(nextContainerRequestPriority.getPriority() + 1);
  taskContainerRequests.put(locator, request);
  priorityLocators.put(request.getPriority(), locator);
}
 
开发者ID:Microsoft,项目名称:pai,代码行数:6,代码来源:StatusManager.java

示例13: testContainerTokenIdentifier

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
@Test
public void testContainerTokenIdentifier() throws IOException {
  ContainerId containerID = ContainerId.newContainerId(
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(
          1, 1), 1), 1);
  String hostName = "host0";
  String appSubmitter = "usr0";
  Resource r = Resource.newInstance(1024, 1);
  long expiryTimeStamp = 1000;
  int masterKeyId = 1;
  long rmIdentifier = 1;
  Priority priority = Priority.newInstance(1);
  long creationTime = 1000;
  
  ContainerTokenIdentifier token = new ContainerTokenIdentifier(
      containerID, hostName, appSubmitter, r, expiryTimeStamp, 
      masterKeyId, rmIdentifier, priority, creationTime);
  
  ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
  
  byte[] tokenContent = token.getBytes();
  DataInputBuffer dib = new DataInputBuffer();
  dib.reset(tokenContent, tokenContent.length);
  anotherToken.readFields(dib);
  
  // verify the whole record equals with original record
  Assert.assertEquals("Token is not the same after serialization " +
      "and deserialization.", token, anotherToken);
  
  Assert.assertEquals(
      "ContainerID from proto is not the same with original token",
      anotherToken.getContainerID(), containerID);
  
  Assert.assertEquals(
      "Hostname from proto is not the same with original token",
      anotherToken.getNmHostAddress(), hostName);
  
  Assert.assertEquals(
      "ApplicationSubmitter from proto is not the same with original token",
      anotherToken.getApplicationSubmitter(), appSubmitter);
  
  Assert.assertEquals(
      "Resource from proto is not the same with original token",
      anotherToken.getResource(), r);
  
  Assert.assertEquals(
      "expiryTimeStamp from proto is not the same with original token",
      anotherToken.getExpiryTimeStamp(), expiryTimeStamp);
  
  Assert.assertEquals(
      "KeyId from proto is not the same with original token",
      anotherToken.getMasterKeyId(), masterKeyId);
  
  Assert.assertEquals(
      "RMIdentifier from proto is not the same with original token",
      anotherToken.getRMIdentifier(), rmIdentifier);
  
  Assert.assertEquals(
      "Priority from proto is not the same with original token",
      anotherToken.getPriority(), priority);
  
  Assert.assertEquals(
      "CreationTime from proto is not the same with original token",
      anotherToken.getCreationTime(), creationTime);
  
  Assert.assertNull(anotherToken.getLogAggregationContext());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestYARNTokenIdentifier.java

示例14: test

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
private void test(String rpcClass) throws Exception {
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
  YarnRPC rpc = YarnRPC.create(conf);
  String bindAddr = "localhost:0";
  InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
  Server server = rpc.getServer(ContainerManagementProtocol.class, 
          new DummyContainerManager(), addr, conf, null, 1);
  server.start();
  RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, ProtobufRpcEngine.class);
  ContainerManagementProtocol proxy = (ContainerManagementProtocol) 
      rpc.getProxy(ContainerManagementProtocol.class, 
          NetUtils.getConnectAddress(server), conf);
  ContainerLaunchContext containerLaunchContext = 
      recordFactory.newRecordInstance(ContainerLaunchContext.class);

  ApplicationId applicationId = ApplicationId.newInstance(0, 0);
  ApplicationAttemptId applicationAttemptId =
      ApplicationAttemptId.newInstance(applicationId, 0);
  ContainerId containerId =
      ContainerId.newContainerId(applicationAttemptId, 100);
  NodeId nodeId = NodeId.newInstance("localhost", 1234);
  Resource resource = Resource.newInstance(1234, 2);
  ContainerTokenIdentifier containerTokenIdentifier =
      new ContainerTokenIdentifier(containerId, "localhost", "user",
        resource, System.currentTimeMillis() + 10000, 42, 42,
        Priority.newInstance(0), 0);
  Token containerToken = newContainerToken(nodeId, "password".getBytes(),
        containerTokenIdentifier);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(containerLaunchContext,
        containerToken);
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  proxy.startContainers(allRequests);

  List<ContainerId> containerIds = new ArrayList<ContainerId>();
  containerIds.add(containerId);
  GetContainerStatusesRequest gcsRequest =
      GetContainerStatusesRequest.newInstance(containerIds);
  GetContainerStatusesResponse response =
      proxy.getContainerStatuses(gcsRequest);
  List<ContainerStatus> statuses = response.getContainerStatuses();

  //test remote exception
  boolean exception = false;
  try {
    StopContainersRequest stopRequest =
        recordFactory.newRecordInstance(StopContainersRequest.class);
    stopRequest.setContainerIds(containerIds);
    proxy.stopContainers(stopRequest);
    } catch (YarnException e) {
    exception = true;
    Assert.assertTrue(e.getMessage().contains(EXCEPTION_MSG));
    Assert.assertTrue(e.getMessage().contains(EXCEPTION_CAUSE));
    System.out.println("Test Exception is " + e.getMessage());
  } catch (Exception ex) {
    ex.printStackTrace();
  }
  Assert.assertTrue(exception);
  
  server.stop();
  Assert.assertNotNull(statuses.get(0));
  Assert.assertEquals(ContainerState.RUNNING, statuses.get(0).getState());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestRPC.java

示例15: testRPCTimeout

import org.apache.hadoop.yarn.api.records.Priority; //导入方法依赖的package包/类
private void testRPCTimeout(String rpcClass) throws Exception {
  Configuration conf = new Configuration();
  // set timeout low for the test
  conf.setInt("yarn.rpc.nm-command-timeout", 3000);

  conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
  YarnRPC rpc = YarnRPC.create(conf);
  String bindAddr = "localhost:0";
  InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
  Server server = rpc.getServer(ContainerManagementProtocol.class,
      new DummyContainerManager(), addr, conf, null, 1);
  server.start();
  try {

    ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(
        ContainerManagementProtocol.class,
        server.getListenerAddress(), conf);
    ContainerLaunchContext containerLaunchContext = recordFactory
        .newRecordInstance(ContainerLaunchContext.class);

    ApplicationId applicationId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId applicationAttemptId =
        ApplicationAttemptId.newInstance(applicationId, 0);
    ContainerId containerId =
        ContainerId.newContainerId(applicationAttemptId, 100);
    NodeId nodeId = NodeId.newInstance("localhost", 1234);
    Resource resource = Resource.newInstance(1234, 2, 3);
    ContainerTokenIdentifier containerTokenIdentifier =
        new ContainerTokenIdentifier(containerId, "localhost", "user",
          resource, System.currentTimeMillis() + 10000, 42, 42,
          Priority.newInstance(0), 0);
    Token containerToken =
        TestRPC.newContainerToken(nodeId, "password".getBytes(),
          containerTokenIdentifier);

    StartContainerRequest scRequest =
        StartContainerRequest.newInstance(containerLaunchContext,
          containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests =
        StartContainersRequest.newInstance(list);
    try {
      proxy.startContainers(allRequests);
    } catch (Exception e) {
      LOG.info(StringUtils.stringifyException(e));
      Assert.assertEquals("Error, exception is not: "
          + SocketTimeoutException.class.getName(),
          SocketTimeoutException.class.getName(), e.getClass().getName());
      return;
    }
  } finally {
    server.stop();
  }

  Assert.fail("timeout exception should have occurred!");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestContainerLaunchRPC.java


注:本文中的org.apache.hadoop.yarn.api.records.Priority.newInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。