當前位置: 首頁>>代碼示例>>Java>>正文


Java Resource.newInstance方法代碼示例

本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Resource.newInstance方法的典型用法代碼示例。如果您正苦於以下問題:Java Resource.newInstance方法的具體用法?Java Resource.newInstance怎麽用?Java Resource.newInstance使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.yarn.api.records.Resource的用法示例。


在下文中一共展示了Resource.newInstance方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: run

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public boolean run() throws Exception {
  YarnClientApplication app = createApplication();
  ApplicationId appId = app.getNewApplicationResponse().getApplicationId();

  // Copy the application jar to the filesystem
  FileSystem fs = FileSystem.get(conf);
  String appIdStr = appId.toString();
  Path dstJarPath = Utils.copyLocalFileToDfs(fs, appIdStr, new Path(tfJar), Constants.TF_JAR_NAME);
  Path dstLibPath = Utils.copyLocalFileToDfs(fs, appIdStr, new Path(tfLib),
      Constants.TF_LIB_NAME);
  Map<String, Path> files = new HashMap<>();
  files.put(Constants.TF_JAR_NAME, dstJarPath);
  Map<String, LocalResource> localResources = Utils.makeLocalResources(fs, files);
  Map<String, String> javaEnv = Utils.setJavaEnv(conf);
  String command = makeAppMasterCommand(dstLibPath.toString(), dstJarPath.toString());
  LOG.info("Make ApplicationMaster command: " + command);
  ContainerLaunchContext launchContext = ContainerLaunchContext.newInstance(
      localResources, javaEnv, Lists.newArrayList(command), null, null, null);
  Resource resource = Resource.newInstance(amMemory, amVCores);
  submitApplication(app, appName, launchContext, resource, amQueue);
  return awaitApplication(appId);
}
 
開發者ID:Intel-bigdata,項目名稱:TensorFlowOnYARN,代碼行數:23,代碼來源:LaunchCluster.java

示例2: testDifferentLocalityRelaxationSamePriority

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test (expected = InvalidContainerRequestException.class)
public void testDifferentLocalityRelaxationSamePriority() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
  
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  ContainerRequest request2 =
      new ContainerRequest(capability, new String[] {"host3"},
          null, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestAMRMClientContainerRequest.java

示例3: setUp

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Before
public void setUp() {
  clock = mock(Clock.class);
  plan = mock(Plan.class);
  rSystem = mock(ReservationSystem.class);
  plans.put(PLAN_NAME, plan);
  rrValidator = new ReservationInputValidator(clock);
  when(clock.getTime()).thenReturn(1L);
  ResourceCalculator rCalc = new DefaultResourceCalculator();
  Resource resource = Resource.newInstance(10240, 10, 10);
  when(plan.getResourceCalculator()).thenReturn(rCalc);
  when(plan.getTotalCapacity()).thenReturn(resource);
  when(rSystem.getQueueForReservation(any(ReservationId.class))).thenReturn(
      PLAN_NAME);
  when(rSystem.getPlan(PLAN_NAME)).thenReturn(plan);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestReservationInputValidator.java

示例4: mockApp

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved,
    int gran) {
  FiCaSchedulerApp app = mock(FiCaSchedulerApp.class);

  ApplicationId appId = ApplicationId.newInstance(TS, id);
  ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(appId, 0);
  when(app.getApplicationId()).thenReturn(appId);
  when(app.getApplicationAttemptId()).thenReturn(appAttId);

  int cAlloc = 0;
  Resource unit = Resource.newInstance(gran, 0, 0);
  List<RMContainer> cReserved = new ArrayList<RMContainer>();
  for (int i = 0; i < reserved; i += gran) {
    cReserved.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
        .getValue()));
    ++cAlloc;
  }
  when(app.getReservedContainers()).thenReturn(cReserved);

  List<RMContainer> cLive = new ArrayList<RMContainer>();
  for (int i = 0; i < used; i += gran) {
    if(setAMContainer && i == 0){
      cLive.add(mockContainer(appAttId, cAlloc, unit, priority.AMCONTAINER
          .getValue()));
    }else if(setLabeledContainer && i ==1){
      cLive.add(mockContainer(appAttId, cAlloc, unit,
          priority.LABELEDCONTAINER.getValue()));
      ++used;
    }
    else{
      cLive.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
          .getValue()));
    }
    ++cAlloc;
  }
  when(app.getLiveContainers()).thenReturn(cLive);
  return app;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:TestProportionalCapacityPreemptionPolicy.java

示例5: PSAgentManager

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public PSAgentManager(AMContext context) {
  this.context = context;
  Configuration conf = context.getConf();

  int psAgentMemory =
      conf.getInt(AngelConf.ANGEL_PSAGENT_MERMORY_MB,
          AngelConf.DEFAULT_ANGEL_PSAGENT_MERMORY_MB);
  int psAgentVcores =
      conf.getInt(AngelConf.ANGEL_PSAGENT_CPU_VCORES,
          AngelConf.DEFAULT_ANGEL_PSAGENT_CPU_VCORES);
  int priority =
      conf.getInt(AngelConf.ANGEL_PSAGENT_PRIORITY,
          AngelConf.DEFAULT_ANGEL_PSAGENT_PRIORITY);

  maxAttemptNum =
      conf.getInt(AngelConf.ANGEL_PSAGENT_MAX_ATTEMPTS,
          AngelConf.DEFAULT_ANGEL_PSAGENT_MAX_ATTEMPTS);

  LOG.info("psagent priority = " + priority);
  psAgentResource = Resource.newInstance(psAgentMemory, psAgentVcores);
  psAgentPriority.setPriority(priority);

  psAgentMap = new ConcurrentHashMap<PSAgentId, AMPSAgent>();
  successPSAgentMap = new ConcurrentHashMap<PSAgentId, AMPSAgent>();
  killedPSAgentMap = new ConcurrentHashMap<PSAgentId, AMPSAgent>();
  failedPSAgentMap = new ConcurrentHashMap<PSAgentId, AMPSAgent>();
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:28,代碼來源:PSAgentManager.java

示例6: testEscapeApplicationSummary

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test (timeout = 30000)
 public void testEscapeApplicationSummary() {
   RMApp app = mock(RMAppImpl.class);
   when(app.getApplicationId()).thenReturn(
       ApplicationId.newInstance(100L, 1));
   when(app.getName()).thenReturn("Multiline\n\n\r\rAppName");
   when(app.getUser()).thenReturn("Multiline\n\n\r\rUserName");
   when(app.getQueue()).thenReturn("Multiline\n\n\r\rQueueName");
   when(app.getState()).thenReturn(RMAppState.RUNNING);
   when(app.getApplicationType()).thenReturn("MAPREDUCE");
   RMAppMetrics metrics =
       new RMAppMetrics(Resource.newInstance(1234, 56, 56), 10, 1, 16384, 64, 64);
   when(app.getRMAppMetrics()).thenReturn(metrics);

   RMAppManager.ApplicationSummary.SummaryBuilder summary =
       new RMAppManager.ApplicationSummary().createAppSummary(app);
   String msg = summary.toString();
   LOG.info("summary: " + msg);
   Assert.assertFalse(msg.contains("\n"));
   Assert.assertFalse(msg.contains("\r"));

   String escaped = "\\n\\n\\r\\r";
   Assert.assertTrue(msg.contains("Multiline" + escaped +"AppName"));
   Assert.assertTrue(msg.contains("Multiline" + escaped +"UserName"));
   Assert.assertTrue(msg.contains("Multiline" + escaped +"QueueName"));
   Assert.assertTrue(msg.contains("memorySeconds=16384"));
   Assert.assertTrue(msg.contains("vcoreSeconds=64"));
   Assert.assertTrue(msg.contains("gcoreSeconds=64"));
   Assert.assertTrue(msg.contains("preemptedAMContainers=1"));
   Assert.assertTrue(msg.contains("preemptedNonAMContainers=10"));
   Assert.assertTrue(msg.contains("preemptedResources=<memory:1234\\, vCores:56\\, gCores:56>"));
   Assert.assertTrue(msg.contains("applicationType=MAPREDUCE"));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:TestAppManager.java

示例7: testZeroAlloaction

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test
public void testZeroAlloaction() {
  ResourceCalculator resCalc = new DefaultResourceCalculator();
  Resource minAlloc = Resource.newInstance(1, 1, 1);
  RLESparseResourceAllocation rleSparseVector =
      new RLESparseResourceAllocation(resCalc, minAlloc);
  rleSparseVector.addInterval(new ReservationInterval(0, Long.MAX_VALUE),
      ReservationRequest.newInstance(Resource.newInstance(0, 0, 0), (0)));
  LOG.info(rleSparseVector.toString());
  Assert.assertEquals(Resource.newInstance(0, 0, 0),
      rleSparseVector.getCapacityAtTime(new Random().nextLong()));
  Assert.assertTrue(rleSparseVector.isEmpty());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestRLESparseResourceAllocation.java

示例8: getTotalPendingRequests

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public synchronized Resource getTotalPendingRequests() {
  Resource ret = Resource.newInstance(0, 0, 0);
  for (ResourceRequest rr : appSchedulingInfo.getAllResourceRequests()) {
    // to avoid double counting we count only "ANY" resource requests
    if (ResourceRequest.isAnyLocation(rr.getResourceName())){
      Resources.addTo(ret,
          Resources.multiply(rr.getCapability(), rr.getNumContainers()));
    }
  }
  return ret;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:FiCaSchedulerApp.java

示例9: getMinCapability

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
protected Resource getMinCapability() {
  int minAllocMemory = conf.getInt(
      YARN_APPLICATION_HPC_SCHEDULER_MINIMUM_ALLOCATION_MB,
      DEFAULT_YARN_APPLICATION_HPC_SCHEDULER_MINIMUM_ALLOCATION_MB);
  int minAllocCPUs = conf.getInt(
      YARN_APPLICATION_HPC_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
      DEFAULT_YARN_APPLICATION_HPC_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
  return Resource.newInstance(minAllocMemory, minAllocCPUs);
}
 
開發者ID:intel-hpdd,項目名稱:scheduling-connector-for-hadoop,代碼行數:10,代碼來源:PBSApplicationClient.java

示例10: getResourcesAtTime

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Override
public Resource getResourcesAtTime(long tick) {
  if (tick < startTime || tick >= endTime) {
    return Resource.newInstance(0, 0, 0);
  }
  return Resources.clone(resourcesOverTime.getCapacityAtTime(tick));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:InMemoryReservationAllocation.java

示例11: testInvalidValidWhenOldRemoved

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test
public void testInvalidValidWhenOldRemoved() {
  AMRMClientImpl<ContainerRequest> client =
      new AMRMClientImpl<ContainerRequest>();
  Configuration conf = new Configuration();
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      MyResolver.class, DNSToSwitchMapping.class);
  client.init(conf);
  
  Resource capability = Resource.newInstance(1024, 1, 1);
  ContainerRequest request1 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request1);
  
  client.removeContainerRequest(request1);

  ContainerRequest request2 =
      new ContainerRequest(capability, new String[] {"host3"},
          null, Priority.newInstance(1), true);
  client.addContainerRequest(request2);
  
  client.removeContainerRequest(request2);
  
  ContainerRequest request3 =
      new ContainerRequest(capability, new String[] {"host1", "host2"},
          null, Priority.newInstance(1), false);
  client.addContainerRequest(request3);
  
  client.removeContainerRequest(request3);
  
  ContainerRequest request4 =
      new ContainerRequest(capability, null,
          new String[] {"rack1"}, Priority.newInstance(1), true);
  client.addContainerRequest(request4);

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:TestAMRMClientContainerRequest.java

示例12: setup

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@BeforeClass
public static void setup() throws Exception {
  // start minicluster
  conf = new YarnConfiguration();
  conf.setLong(
    YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
    rolling_interval_sec);
  conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
  conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
  yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
  yarnCluster.init(conf);
  yarnCluster.start();

  // start rm client
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(conf);
  yarnClient.start();

  // get node info
  nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
  
  priority = Priority.newInstance(1);
  priority2 = Priority.newInstance(2);
  capability = Resource.newInstance(1024, 1, 1);

  node = nodeReports.get(0).getNodeId().getHost();
  rack = nodeReports.get(0).getRackName();
  nodes = new String[]{ node };
  racks = new String[]{ rack };
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:TestAMRMClient.java

示例13: getRunningNode

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private RMNodeImpl getRunningNode(String nmVersion) {
  NodeId nodeId = BuilderUtils.newNodeId("localhost", 0);
  Resource capability = Resource.newInstance(4096, 4, 4);
  RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0,
      null, capability, nmVersion);
  node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null));
  Assert.assertEquals(NodeState.RUNNING, node.getState());
  return node;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:TestRMNodeTransitions.java

示例14: FSLeafQueue

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public FSLeafQueue(String name, FairScheduler scheduler,
    FSParentQueue parent) {
  super(name, scheduler, parent);
  this.lastTimeAtMinShare = scheduler.getClock().getTime();
  this.lastTimeAtFairShareThreshold = scheduler.getClock().getTime();
  activeUsersManager = new ActiveUsersManager(getMetrics());
  amResourceUsage = Resource.newInstance(0, 0, 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:FSLeafQueue.java

示例15: testUpdateMaxAllocationUsesTotal

import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test
public void testUpdateMaxAllocationUsesTotal() throws IOException {
  final int configuredMaxVCores = 20;
  final int configuredMaxMemory = 10 * 1024;
  Resource configuredMaximumResource = Resource.newInstance
      (configuredMaxMemory, configuredMaxVCores, configuredMaxVCores);

  configureScheduler();
  YarnConfiguration conf = getConf();
  conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
      configuredMaxVCores);
  conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      configuredMaxMemory);
  conf.setLong(
      YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
      0);

  MockRM rm = new MockRM(conf);
  try {
    rm.start();
    AbstractYarnScheduler scheduler = (AbstractYarnScheduler) rm
        .getResourceScheduler();

    Resource emptyResource = Resource.newInstance(0, 0, 0);
    Resource fullResource1 = Resource.newInstance(1024, 5, 5);
    Resource fullResource2 = Resource.newInstance(2048, 10, 10);

    SchedulerNode mockNode1 = mock(SchedulerNode.class);
    when(mockNode1.getNodeID()).thenReturn(NodeId.newInstance("foo", 8080));
    when(mockNode1.getAvailableResource()).thenReturn(emptyResource);
    when(mockNode1.getTotalResource()).thenReturn(fullResource1);

    SchedulerNode mockNode2 = mock(SchedulerNode.class);
    when(mockNode1.getNodeID()).thenReturn(NodeId.newInstance("bar", 8081));
    when(mockNode2.getAvailableResource()).thenReturn(emptyResource);
    when(mockNode2.getTotalResource()).thenReturn(fullResource2);

    verifyMaximumResourceCapability(configuredMaximumResource, scheduler);

    scheduler.nodes = new HashMap<NodeId, SchedulerNode>();

    scheduler.nodes.put(mockNode1.getNodeID(), mockNode1);
    scheduler.updateMaximumAllocation(mockNode1, true);
    verifyMaximumResourceCapability(fullResource1, scheduler);

    scheduler.nodes.put(mockNode2.getNodeID(), mockNode2);
    scheduler.updateMaximumAllocation(mockNode2, true);
    verifyMaximumResourceCapability(fullResource2, scheduler);

    scheduler.nodes.remove(mockNode2.getNodeID());
    scheduler.updateMaximumAllocation(mockNode2, false);
    verifyMaximumResourceCapability(fullResource1, scheduler);

    scheduler.nodes.remove(mockNode1.getNodeID());
    scheduler.updateMaximumAllocation(mockNode1, false);
    verifyMaximumResourceCapability(configuredMaximumResource, scheduler);
  } finally {
    rm.stop();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:61,代碼來源:TestAbstractYarnScheduler.java


注:本文中的org.apache.hadoop.yarn.api.records.Resource.newInstance方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。