本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Resource.setVirtualCores方法的典型用法代碼示例。如果您正苦於以下問題:Java Resource.setVirtualCores方法的具體用法?Java Resource.setVirtualCores怎麽用?Java Resource.setVirtualCores使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.Resource
的用法示例。
在下文中一共展示了Resource.setVirtualCores方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testRegisterNodeManagerRequestPBImpl
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* Test RegisterNodeManagerRequestPBImpl.
*/
@Test
public void testRegisterNodeManagerRequestPBImpl() {
RegisterNodeManagerRequestPBImpl original = new RegisterNodeManagerRequestPBImpl();
original.setHttpPort(8080);
original.setNodeId(getNodeId());
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(10000);
resource.setVirtualCores(2);
resource.setGpuCores(3);
original.setResource(resource);
RegisterNodeManagerRequestPBImpl copy = new RegisterNodeManagerRequestPBImpl(
original.getProto());
assertEquals(8080, copy.getHttpPort());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(10000, copy.getResource().getMemory());
assertEquals(2, copy.getResource().getVirtualCores());
assertEquals(3, copy.getResource().getGpuCores());
}
示例2: testSubmitApplicationOnHA
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext =
Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer =
Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(10);
capability.setVirtualCores(1);
capability.setGpuCores(1);
appContext.setResource(capability);
ApplicationId appId = client.submitApplication(appContext);
Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
.containsKey(appId));
}
示例3: submitTasks
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* submit tasks to request containers for the tasks
*
* @param tasks
* a collection of tasks we want to ask container for
*/
private synchronized void submitTasks(Collection<TaskRecord> tasks) {
for (TaskRecord r : tasks) {
Resource resource = Records.newRecord(Resource.class);
if (r.taskRole == "server") {
resource.setMemory(serverMemoryMB);
resource.setVirtualCores(serverCores);
} else {
resource.setMemory(workerMemoryMB);
resource.setVirtualCores(workerCores);
}
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(this.appPriority);
r.containerRequest = new ContainerRequest(resource, null, null,
priority);
rmClient.addContainerRequest(r.containerRequest);
pendingTasks.add(r);
}
}
示例4: createResource
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource createResource(int memory, int cores, int gcores) {
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(cores);
resource.setGpuCores(gcores);
return resource;
}
示例5: multiplyAndRoundDown
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
Resource out = clone(lhs);
out.setMemory((int)(lhs.getMemory() * by));
out.setVirtualCores((int)(lhs.getVirtualCores() * by));
out.setGpuCores((int)(lhs.getGpuCores() * by));
return out;
}
示例6: newResource
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource newResource(int memory, int vCores) {
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(vCores);
resource.setGpuCores(0);
return resource;
}
示例7: setResourceValue
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private static void setResourceValue(int val, Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
resource.setMemory(val);
break;
case CPU:
resource.setVirtualCores(val);
break;
case GPU:
resource.setGpuCores(val);
break;
default:
throw new IllegalArgumentException("Invalid resource");
}
}
示例8: startAppMaster
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private ApplicationReport startAppMaster(ApplicationSubmissionContext appContext) throws Exception {
appContext.setMaxAppAttempts(MAX_ATTEMPT);
Map<String, LocalResource> localResources = new HashMap<>();
Set<Path> shippedPaths = new HashSet<>();
collectLocalResources(localResources, shippedPaths);
final ContainerLaunchContext amContainer = setupApplicationMasterContainer(false, true, false);
amContainer.setLocalResources(localResources);
final String classPath = localResources.keySet().stream().collect(Collectors.joining(File.pathSeparator));
final String shippedFiles = shippedPaths.stream().map(Path::toString)
.collect(Collectors.joining(","));
// Setup CLASSPATH and environment variables for ApplicationMaster
ApplicationId appId = appContext.getApplicationId();
final Map<String, String> appMasterEnv = setUpAmEnvironment(
appId,
classPath,
shippedFiles,
getDynamicPropertiesEncoded()
);
amContainer.setEnvironment(appMasterEnv);
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(getFlinkConfiguration()
.getInteger(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
capability.setVirtualCores(1);
appContext.setApplicationName(job.name());
appContext.setApplicationType(ATHENAX_APPLICATION_TYPE);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setApplicationTags(Collections.singleton(job.metadata().serialize()));
if (job.queue() != null) {
appContext.setQueue(job.queue());
}
LOG.info("Submitting application master {}", appId);
yarnClient.submitApplication(appContext);
PollDeploymentStatus poll = new PollDeploymentStatus(appId);
YARN_POLL_EXECUTOR.submit(poll);
try {
return poll.result.get();
} catch (ExecutionException e) {
LOG.warn("Failed to deploy {}, cause: {}", appId.toString(), e.getCause());
yarnClient.killApplication(appId);
throw (Exception) e.getCause();
}
}
示例9: addTo
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource addTo(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() + rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores());
lhs.setGpuCores(lhs.getGpuCores() + rhs.getGpuCores());
return lhs;
}
示例10: subtractFrom
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource subtractFrom(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() - rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores());
lhs.setGpuCores(lhs.getGpuCores() - rhs.getGpuCores());
return lhs;
}
示例11: multiplyTo
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource multiplyTo(Resource lhs, double by) {
lhs.setMemory((int)(lhs.getMemory() * by));
lhs.setVirtualCores((int)(lhs.getVirtualCores() * by));
lhs.setGpuCores((int) (lhs.getGpuCores() * by));
return lhs;
}
示例12: testNames
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test public void testNames() {
DefaultMetricsSystem.initialize("NodeManager");
NodeManagerMetrics metrics = NodeManagerMetrics.create();
Resource total = Records.newRecord(Resource.class);
total.setMemory(8*GiB);
total.setVirtualCores(16);
total.setGpuCores(16);
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(512); //512MiB
resource.setVirtualCores(2);
resource.setGpuCores(1);
metrics.addResource(total);
for (int i = 10; i-- > 0;) {
// allocate 10 containers(allocatedGB: 5GiB, availableGB: 3GiB)
metrics.launchedContainer();
metrics.allocateContainer(resource);
}
metrics.initingContainer();
metrics.endInitingContainer();
metrics.runningContainer();
metrics.endRunningContainer();
// Releasing 3 containers(allocatedGB: 3.5GiB, availableGB: 4.5GiB)
metrics.completedContainer();
metrics.releaseContainer(resource);
metrics.failedContainer();
metrics.releaseContainer(resource);
metrics.killedContainer();
metrics.releaseContainer(resource);
metrics.initingContainer();
metrics.runningContainer();
Assert.assertTrue(!metrics.containerLaunchDuration.changed());
metrics.addContainerLaunchDuration(1);
Assert.assertTrue(metrics.containerLaunchDuration.changed());
// availableGB is expected to be floored,
// while allocatedGB is expected to be ceiled.
// allocatedGB: 3.5GB allocated memory is shown as 4GB
// availableGB: 4.5GB available memory is shown as 4GB
checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 14, 2, 7, 9);
}
示例13: testNodeRegistrationWithMinimumAllocations
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test
public void testNodeRegistrationWithMinimumAllocations() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, "2048");
conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, "4");
conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_GCORES, "4");
rm = new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService
= rm.getResourceTrackerService();
RegisterNodeManagerRequest req = Records.newRecord(
RegisterNodeManagerRequest.class);
NodeId nodeId = BuilderUtils.newNodeId("host", 1234);
req.setNodeId(nodeId);
Resource capability = BuilderUtils.newResource(1024, 1, 1);
req.setResource(capability);
RegisterNodeManagerResponse response1 =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response1.getNodeAction());
capability.setMemory(2048);
capability.setVirtualCores(1);
capability.setGpuCores(1);
req.setResource(capability);
RegisterNodeManagerResponse response2 =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response2.getNodeAction());
capability.setMemory(1024);
capability.setVirtualCores(4);
capability.setGpuCores(4);
req.setResource(capability);
RegisterNodeManagerResponse response3 =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response3.getNodeAction());
capability.setMemory(2048);
capability.setVirtualCores(4);
capability.setGpuCores(4);
req.setResource(capability);
RegisterNodeManagerResponse response4 =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.NORMAL,response4.getNodeAction());
}
示例14: run
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}