本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Resource.setMemory方法的典型用法代碼示例。如果您正苦於以下問題:Java Resource.setMemory方法的具體用法?Java Resource.setMemory怎麽用?Java Resource.setMemory使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.Resource
的用法示例。
在下文中一共展示了Resource.setMemory方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: submitTasks
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* submit tasks to request containers for the tasks
*
* @param tasks
* a collection of tasks we want to ask container for
*/
private synchronized void submitTasks(Collection<TaskRecord> tasks) {
for (TaskRecord r : tasks) {
Resource resource = Records.newRecord(Resource.class);
if (r.taskRole == "server") {
resource.setMemory(serverMemoryMB);
resource.setVirtualCores(serverCores);
} else {
resource.setMemory(workerMemoryMB);
resource.setVirtualCores(workerCores);
}
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(this.appPriority);
r.containerRequest = new ContainerRequest(resource, null, null,
priority);
rmClient.addContainerRequest(r.containerRequest);
pendingTasks.add(r);
}
}
示例2: testRegisterNodeManagerRequestPBImpl
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* Test RegisterNodeManagerRequestPBImpl.
*/
@Test
public void testRegisterNodeManagerRequestPBImpl() {
RegisterNodeManagerRequestPBImpl original = new RegisterNodeManagerRequestPBImpl();
original.setHttpPort(8080);
original.setNodeId(getNodeId());
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(10000);
resource.setVirtualCores(2);
resource.setGpuCores(3);
original.setResource(resource);
RegisterNodeManagerRequestPBImpl copy = new RegisterNodeManagerRequestPBImpl(
original.getProto());
assertEquals(8080, copy.getHttpPort());
assertEquals(9090, copy.getNodeId().getPort());
assertEquals(10000, copy.getResource().getMemory());
assertEquals(2, copy.getResource().getVirtualCores());
assertEquals(3, copy.getResource().getGpuCores());
}
示例3: getHeadroom
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public Resource getHeadroom() {
Resource queueCurrentLimit;
Resource clusterResource;
synchronized (queueResourceLimitsInfo) {
queueCurrentLimit = queueResourceLimitsInfo.getQueueCurrentLimit();
clusterResource = queueResourceLimitsInfo.getClusterResource();
}
Resource headroom = queue.getHeadroom(user, queueCurrentLimit,
clusterResource, application, required);
// Corner case to deal with applications being slightly over-limit
if (headroom.getMemory() < 0) {
headroom.setMemory(0);
}
return headroom;
}
示例4: createResourceReq
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public ResourceRequest createResourceReq(String resource, int memory, int priority,
int containers, String labelExpression) throws Exception {
ResourceRequest req = Records.newRecord(ResourceRequest.class);
req.setResourceName(resource);
req.setNumContainers(containers);
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(priority);
req.setPriority(pri);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memory);
req.setCapability(capability);
if (labelExpression != null) {
req.setNodeLabelExpression(labelExpression);
}
return req;
}
示例5: testSubmitApplicationOnHA
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext =
Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer =
Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(10);
capability.setVirtualCores(1);
capability.setGpuCores(1);
appContext.setResource(capability);
ApplicationId appId = client.submitApplication(appContext);
Assert.assertTrue(getActiveRM().getRMContext().getRMApps()
.containsKey(appId));
}
示例6: createResource
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource createResource(int memory, int cores, int gcores) {
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(cores);
resource.setGpuCores(gcores);
return resource;
}
示例7: multiplyAndRoundDown
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
Resource out = clone(lhs);
out.setMemory((int)(lhs.getMemory() * by));
out.setVirtualCores((int)(lhs.getVirtualCores() * by));
out.setGpuCores((int)(lhs.getGpuCores() * by));
return out;
}
示例8: newResource
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource newResource(int memory, int vCores) {
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(vCores);
resource.setGpuCores(0);
return resource;
}
示例9: setResourceValue
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private static void setResourceValue(int val, Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
resource.setMemory(val);
break;
case CPU:
resource.setVirtualCores(val);
break;
case GPU:
resource.setGpuCores(val);
break;
default:
throw new IllegalArgumentException("Invalid resource");
}
}
示例10: testFromYarn
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test
public void testFromYarn() throws Exception {
int appStartTime = 612354;
int appFinishTime = 612355;
YarnApplicationState state = YarnApplicationState.RUNNING;
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationReport applicationReport = Records
.newRecord(ApplicationReport.class);
applicationReport.setApplicationId(applicationId);
applicationReport.setYarnApplicationState(state);
applicationReport.setStartTime(appStartTime);
applicationReport.setFinishTime(appFinishTime);
applicationReport.setUser("TestTypeConverter-user");
ApplicationResourceUsageReport appUsageRpt = Records
.newRecord(ApplicationResourceUsageReport.class);
Resource r = Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
applicationReport.setApplicationResourceUsageReport(appUsageRpt);
JobStatus jobStatus = TypeConverter.fromYarn(applicationReport, "dummy-jobfile");
Assert.assertEquals(appStartTime, jobStatus.getStartTime());
Assert.assertEquals(appFinishTime, jobStatus.getFinishTime());
Assert.assertEquals(state.toString(), jobStatus.getState().toString());
}
示例11: startAppMaster
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private ApplicationReport startAppMaster(ApplicationSubmissionContext appContext) throws Exception {
appContext.setMaxAppAttempts(MAX_ATTEMPT);
Map<String, LocalResource> localResources = new HashMap<>();
Set<Path> shippedPaths = new HashSet<>();
collectLocalResources(localResources, shippedPaths);
final ContainerLaunchContext amContainer = setupApplicationMasterContainer(false, true, false);
amContainer.setLocalResources(localResources);
final String classPath = localResources.keySet().stream().collect(Collectors.joining(File.pathSeparator));
final String shippedFiles = shippedPaths.stream().map(Path::toString)
.collect(Collectors.joining(","));
// Setup CLASSPATH and environment variables for ApplicationMaster
ApplicationId appId = appContext.getApplicationId();
final Map<String, String> appMasterEnv = setUpAmEnvironment(
appId,
classPath,
shippedFiles,
getDynamicPropertiesEncoded()
);
amContainer.setEnvironment(appMasterEnv);
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(getFlinkConfiguration()
.getInteger(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
capability.setVirtualCores(1);
appContext.setApplicationName(job.name());
appContext.setApplicationType(ATHENAX_APPLICATION_TYPE);
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setApplicationTags(Collections.singleton(job.metadata().serialize()));
if (job.queue() != null) {
appContext.setQueue(job.queue());
}
LOG.info("Submitting application master {}", appId);
yarnClient.submitApplication(appContext);
PollDeploymentStatus poll = new PollDeploymentStatus(appId);
YARN_POLL_EXECUTOR.submit(poll);
try {
return poll.result.get();
} catch (ExecutionException e) {
LOG.warn("Failed to deploy {}, cause: {}", appId.toString(), e.getCause());
yarnClient.killApplication(appId);
throw (Exception) e.getCause();
}
}
示例12: addTo
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource addTo(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() + rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores());
lhs.setGpuCores(lhs.getGpuCores() + rhs.getGpuCores());
return lhs;
}
示例13: subtractFrom
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource subtractFrom(Resource lhs, Resource rhs) {
lhs.setMemory(lhs.getMemory() - rhs.getMemory());
lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores());
lhs.setGpuCores(lhs.getGpuCores() - rhs.getGpuCores());
return lhs;
}
示例14: multiplyTo
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static Resource multiplyTo(Resource lhs, double by) {
lhs.setMemory((int)(lhs.getMemory() * by));
lhs.setVirtualCores((int)(lhs.getVirtualCores() * by));
lhs.setGpuCores((int) (lhs.getGpuCores() * by));
return lhs;
}
示例15: testNames
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Test public void testNames() {
DefaultMetricsSystem.initialize("NodeManager");
NodeManagerMetrics metrics = NodeManagerMetrics.create();
Resource total = Records.newRecord(Resource.class);
total.setMemory(8*GiB);
total.setVirtualCores(16);
total.setGpuCores(16);
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(512); //512MiB
resource.setVirtualCores(2);
resource.setGpuCores(1);
metrics.addResource(total);
for (int i = 10; i-- > 0;) {
// allocate 10 containers(allocatedGB: 5GiB, availableGB: 3GiB)
metrics.launchedContainer();
metrics.allocateContainer(resource);
}
metrics.initingContainer();
metrics.endInitingContainer();
metrics.runningContainer();
metrics.endRunningContainer();
// Releasing 3 containers(allocatedGB: 3.5GiB, availableGB: 4.5GiB)
metrics.completedContainer();
metrics.releaseContainer(resource);
metrics.failedContainer();
metrics.releaseContainer(resource);
metrics.killedContainer();
metrics.releaseContainer(resource);
metrics.initingContainer();
metrics.runningContainer();
Assert.assertTrue(!metrics.containerLaunchDuration.changed());
metrics.addContainerLaunchDuration(1);
Assert.assertTrue(metrics.containerLaunchDuration.changed());
// availableGB is expected to be floored,
// while allocatedGB is expected to be ceiled.
// allocatedGB: 3.5GB allocated memory is shown as 4GB
// availableGB: 4.5GB available memory is shown as 4GB
checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 14, 2, 7, 9);
}