本文整理汇总了Java中org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor类的典型用法代码示例。如果您正苦于以下问题:Java LinuxContainerExecutor类的具体用法?Java LinuxContainerExecutor怎么用?Java LinuxContainerExecutor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LinuxContainerExecutor类属于org.apache.hadoop.yarn.server.nodemanager包,在下文中一共展示了LinuxContainerExecutor类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
@VisibleForTesting
void init(LinuxContainerExecutor lce, ResourceCalculatorPlugin plugin)
throws IOException {
initConfig();
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList<String> cgroupKVs = new ArrayList<String>();
cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" +
CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
initializeControllerPaths();
// cap overall usage to the number of cores allocated to YARN
yarnProcessors = NodeManagerHardwareUtils.getContainersCores(plugin, conf);
int systemProcessors = plugin.getNumProcessors();
if (systemProcessors != (int) yarnProcessors) {
LOG.info("YARN containers restricted to " + yarnProcessors + " cores");
int[] limits = getOverallLimits(yarnProcessors);
updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, String.valueOf(limits[0]));
updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(limits[1]));
} else if (cpuLimitsExist()) {
LOG.info("Removing CPU constraints for YARN containers.");
updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(-1));
}
}
示例2: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
@VisibleForTesting
void init(LinuxContainerExecutor lce, ResourceCalculatorPlugin plugin)
throws IOException {
initConfig();
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList<String> cgroupKVs = new ArrayList<String>();
cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" +
CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
initializeControllerPaths();
nodeVCores = NodeManagerHardwareUtils.getVCores(plugin, conf);
// cap overall usage to the number of cores allocated to YARN
yarnProcessors = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
int systemProcessors = NodeManagerHardwareUtils.getNodeCPUs(plugin, conf);
if (systemProcessors != (int) yarnProcessors) {
LOG.info("YARN containers restricted to " + yarnProcessors + " cores");
int[] limits = getOverallLimits(yarnProcessors);
updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, String.valueOf(limits[0]));
updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(limits[1]));
} else if (cpuLimitsExist()) {
LOG.info("Removing CPU constraints for YARN containers.");
updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(-1));
}
}
示例3: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
public synchronized void init(LinuxContainerExecutor lce) throws IOException {
this.cgroupPrefix = conf.get(YarnConfiguration.
NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn");
this.cgroupMount = conf.getBoolean(YarnConfiguration.
NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
this.cgroupMountPath = conf.get(YarnConfiguration.
NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
// remove extra /'s at end or start of cgroupPrefix
if (cgroupPrefix.charAt(0) == '/') {
cgroupPrefix = cgroupPrefix.substring(1);
}
int len = cgroupPrefix.length();
if (cgroupPrefix.charAt(len - 1) == '/') {
cgroupPrefix = cgroupPrefix.substring(0, len - 1);
}
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList<String> cgroupKVs = new ArrayList<String>();
cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" +
CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
initializeControllerPaths();
}
示例4: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
public void init(LinuxContainerExecutor lce) throws IOException {
initConfig();
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList<String> cgroupKVs = new ArrayList<String>();
cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" +
CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
initializeControllerPaths();
}
示例5: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
public void init(LinuxContainerExecutor lce) {
}
示例6: testInit
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
@Test
public void testInit() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler =
new CustomCgroupsLCEResourceHandler();
YarnConfiguration conf = new YarnConfiguration();
final int numProcessors = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cgroupMountDir = createMockCgroupMount(cgroupDir);
// create mock mtab
File mockMtab = createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
// check values
// in this case, we're using all cpu so the files
// shouldn't exist(because init won't create them
handler.init(mockLCE, plugin);
File periodFile = new File(cgroupMountDir, "cpu.cfs_period_us");
File quotaFile = new File(cgroupMountDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// subset of cpu being used, files should be created
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
int period = readIntFromFile(periodFile);
int quota = readIntFromFile(quotaFile);
Assert.assertEquals(100 * 1000, period);
Assert.assertEquals(1000 * 1000, quota);
// set cpu back to 100, quota should be -1
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
100);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
quota = readIntFromFile(quotaFile);
Assert.assertEquals(-1, quota);
FileUtils.deleteQuietly(cgroupDir);
}
示例7: testInit
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
@Test
public void testInit() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler =
new CustomCgroupsLCEResourceHandler();
YarnConfiguration conf = new YarnConfiguration();
final int numProcessors = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numProcessors).when(plugin).getNumCores();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cpuCgroupMountDir = TestCGroupsHandlerImpl.createMockCgroupMount(
cgroupDir, "cpu");
// create mock mtab
File mockMtab = TestCGroupsHandlerImpl.createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
// check values
// in this case, we're using all cpu so the files
// shouldn't exist(because init won't create them
handler.init(mockLCE, plugin);
File periodFile = new File(cpuCgroupMountDir, "cpu.cfs_period_us");
File quotaFile = new File(cpuCgroupMountDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// subset of cpu being used, files should be created
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
int period = readIntFromFile(periodFile);
int quota = readIntFromFile(quotaFile);
Assert.assertEquals(100 * 1000, period);
Assert.assertEquals(1000 * 1000, quota);
// set cpu back to 100, quota should be -1
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
100);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
quota = readIntFromFile(quotaFile);
Assert.assertEquals(-1, quota);
FileUtils.deleteQuietly(cgroupDir);
}
示例8: testInit
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
@Test
public void testInit() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler =
new CustomCgroupsLCEResourceHandler();
YarnConfiguration conf = new YarnConfiguration();
final int numProcessors = 4;
ResourceCalculatorPlugin plugin =
Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cgroupDir = createMockCgroup();
File cgroupMountDir = createMockCgroupMount(cgroupDir);
// create mock mtab
File mockMtab = createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
// check values
// in this case, we're using all cpu so the files
// shouldn't exist(because init won't create them
handler.init(mockLCE, plugin);
File periodFile = new File(cgroupMountDir, "cpu.cfs_period_us");
File quotaFile = new File(cgroupMountDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// subset of cpu being used, files should be created
conf
.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
int period = readIntFromFile(periodFile);
int quota = readIntFromFile(quotaFile);
Assert.assertEquals(100 * 1000, period);
Assert.assertEquals(1000 * 1000, quota);
// set cpu back to 100, quota should be -1
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
100);
handler.limits[0] = 100 * 1000;
handler.limits[1] = 1000 * 1000;
handler.init(mockLCE, plugin);
quota = readIntFromFile(quotaFile);
Assert.assertEquals(-1, quota);
FileUtils.deleteQuietly(cgroupDir);
}
示例9: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
public void init(LinuxContainerExecutor lce) throws IOException {
this.init(lce,
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf));
}
示例10: init
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; //导入依赖的package包/类
void init(LinuxContainerExecutor lce) throws IOException;