本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster类的典型用法代码示例。如果您正苦于以下问题:Java MiniMRYarnCluster类的具体用法?Java MiniMRYarnCluster怎么用?Java MiniMRYarnCluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MiniMRYarnCluster类属于org.apache.hadoop.mapreduce.v2包,在下文中一共展示了MiniMRYarnCluster类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.checkExitOnShutdown(true);
builder.numDataNodes(numSlaves);
builder.format(true);
builder.racks(null);
dfsCluster = builder.build();
mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
mrCluster.init(conf);
mrCluster.start();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
p1 = fs.makeQualified(p1);
}
示例2: testJobWithNonNormalizedCapabilities
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
JobConf jobConf = new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb", 700);
jobConf.setInt("mapred.reduce.memory.mb", 1500);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(jobConf);
Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.submit();
boolean completed = job.waitForCompletion(true);
Assert.assertTrue("Job should be completed", completed);
Assert.assertEquals("Job should be finished successfully",
JobStatus.State.SUCCEEDED, job.getJobState());
}
示例3: setUp
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.checkExitOnShutdown(true);
builder.numDataNodes(numSlaves);
builder.format(true);
builder.racks(null);
dfsCluster = builder.build();
mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs, false);
mrCluster.init(conf);
mrCluster.start();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
p1 = fs.makeQualified(p1);
}
示例4: tearDown
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
@After
public void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
if (mrCluster != null) {
mrCluster.stop();
}
}
示例5: stopMiniMRYarnCluster
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
try {
if (miniMRYarnCluster != null)
miniMRYarnCluster.stop();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
示例6: create
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
public static MiniMRClientCluster create(Class<?> caller, String identifier,
int noOfNMs, Configuration conf) throws IOException {
if (conf == null) {
conf = new Configuration();
}
FileSystem fs = FileSystem.get(conf);
Path testRootDir = new Path("target", identifier + "-tmpDir")
.makeQualified(fs);
Path appJar = new Path(testRootDir, "MRAppJar.jar");
// Copy MRAppJar and make it private.
Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);
fs.copyFromLocalFile(appMasterJar, appJar);
fs.setPermission(appJar, new FsPermission("744"));
Job job = Job.getInstance(conf);
job.addFileToClassPath(appJar);
Path callerJar = new Path(JarFinder.getJar(caller));
Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
fs.copyFromLocalFile(callerJar, remoteCallerJar);
fs.setPermission(remoteCallerJar, new FsPermission("744"));
job.addFileToClassPath(remoteCallerJar);
MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(identifier,
noOfNMs);
job.getConfiguration().set("minimrclientcluster.caller.name",
identifier);
job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
noOfNMs);
miniMRYarnCluster.init(job.getConfiguration());
miniMRYarnCluster.start();
return new MiniMRYarnClusterAdapter(miniMRYarnCluster);
}
示例7: getInstanceConfig
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
public static synchronized Configuration getInstanceConfig() throws Exception {
if (conf == null) {
File zooRoot = File.createTempFile("hbase-zookeeper", "");
zooRoot.delete();
ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000);
ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000);
factory.startup(zookeper);
YarnConfiguration yconf = new YarnConfiguration();
String argLine = System.getProperty("argLine");
if (argLine != null) {
yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec"));
}
yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster");
miniCluster.init(yconf);
yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
miniCluster.start();
File hbaseRoot = File.createTempFile("hbase-root", "");
hbaseRoot.delete();
conf = HBaseConfiguration.create(miniCluster.getConfig());
conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString());
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort());
conf.set("hbase.master.hostname", "localhost");
conf.set("hbase.regionserver.hostname", "localhost");
conf.setInt("hbase.master.info.port", -1);
conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString());
LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
cluster.startup();
}
return conf;
}
示例8: start
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
@Override
public void start() throws Exception {
LOG.info("MR: Starting MiniMRYarnCluster");
configure();
miniMRYarnCluster = new MiniMRYarnCluster(testName, numNodeManagers);
miniMRYarnCluster.serviceInit(configuration);
miniMRYarnCluster.init(configuration);
miniMRYarnCluster.start();
}
示例9: setUp
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
Configuration conf = new Configuration(getConfig());
mrCluster = new MiniMRYarnCluster(this.getClass().getName(), numDatanode,
false);
conf.set("fs.defaultFS", fs.getUri().toString());
mrCluster.init(conf);
mrCluster.start();
}