本文整理汇总了Java中org.apache.hadoop.mapred.MRCaching.TestResult类的典型用法代码示例。如果您正苦于以下问题:Java TestResult类的具体用法?Java TestResult怎么用?Java TestResult使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TestResult类属于org.apache.hadoop.mapred.MRCaching包,在下文中一共展示了TestResult类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例2: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster(conf, 1, true, null);
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getName(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", false);
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", true);
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例3: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
conf.set("fs.hdfs.impl",
"org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
dfs = new MiniDFSCluster(conf, 1, true, null);
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getName(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", false);
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", true);
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例4: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster(conf, 1, true, null);
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例5: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
@Test
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster.Builder(conf).build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n");
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例6: testWithDFS
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
try {
JobConf conf = new JobConf();
dfs = new MiniDFSCluster(conf, 1, true, null);
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
MRCaching.setupCache("/cachedir", fileSys);
// run the wordcount example with caching
TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", false);
assertTrue("Archives not matching", ret.isOutputOk);
// launch MR cache with symlinks
ret = MRCaching.launchMRCache("/testing/wc/input",
"/testing/wc/output",
"/cachedir",
mr.createJobConf(),
"The quick brown fox\nhas many silly\n"
+ "red fox sox\n", true);
assertTrue("Archives not matching", ret.isOutputOk);
} finally {
if (fileSys != null) {
fileSys.close();
}
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
示例7: testWithLocal
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal()
throws IOException, InterruptedException, ClassNotFoundException {
MiniMRCluster mr = null;
try {
mr = new MiniMRCluster(2, "file:///", 3);
// make cleanup inline sothat validation of existence of these directories
// can be done
mr.setInlineCleanupThreads();
TestMiniMRWithDFS.runPI(mr, mr.createJobConf());
// run the wordcount example with caching
JobConf job = mr.createJobConf();
TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
TEST_ROOT_DIR + "/wc/output",
TEST_ROOT_DIR + "/cachedir",
job,
"The quick brown fox\n"
+ "has many silly\n"
+ "red fox sox\n");
// assert the number of lines read during caching
assertTrue("Failed test archives not matching", ret.isOutputOk);
// test the task report fetchers
JobClient client = new JobClient(job);
JobID jobid = ret.job.getID();
TaskReport[] reports;
reports = client.getSetupTaskReports(jobid);
assertEquals("number of setups", 2, reports.length);
reports = client.getMapTaskReports(jobid);
assertEquals("number of maps", 1, reports.length);
reports = client.getReduceTaskReports(jobid);
assertEquals("number of reduces", 1, reports.length);
reports = client.getCleanupTaskReports(jobid);
assertEquals("number of cleanups", 2, reports.length);
Counters counters = ret.job.getCounters();
assertEquals("number of map inputs", 3,
counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
assertEquals("number of reduce outputs", 9,
counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
runCustomFormats(mr);
runSecondarySort(mr.createJobConf());
} finally {
if (mr != null) { mr.shutdown(); }
}
}
示例8: testWithLocal
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal()
throws IOException, InterruptedException, ClassNotFoundException {
MiniMRCluster mr = null;
try {
mr = new MiniMRCluster(2, "file:///", 3);
TestMiniMRWithDFS.runPI(mr, mr.createJobConf());
// run the wordcount example with caching
JobConf job = mr.createJobConf();
TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
TEST_ROOT_DIR + "/wc/output",
TEST_ROOT_DIR + "/cachedir",
job,
"The quick brown fox\n"
+ "has many silly\n"
+ "red fox sox\n");
// assert the number of lines read during caching
assertTrue("Failed test archives not matching", ret.isOutputOk);
// test the task report fetchers
JobClient client = new JobClient(job);
JobID jobid = ret.job.getID();
TaskReport[] reports;
reports = client.getSetupTaskReports(jobid);
assertEquals("number of setups", 2, reports.length);
reports = client.getMapTaskReports(jobid);
assertEquals("number of maps", 1, reports.length);
reports = client.getReduceTaskReports(jobid);
assertEquals("number of reduces", 1, reports.length);
reports = client.getCleanupTaskReports(jobid);
assertEquals("number of cleanups", 2, reports.length);
Counters counters = ret.job.getCounters();
assertEquals("number of map inputs", 3,
counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
assertEquals("number of reduce outputs", 9,
counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
runCustomFormats(mr);
runSecondarySort(mr.createJobConf());
} finally {
if (mr != null) { mr.shutdown(); }
}
}
示例9: testWithLocal
import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal()
throws IOException, InterruptedException, ClassNotFoundException {
MiniMRCluster mr = null;
try {
mr = new MiniMRCluster(2, "file:///", 3);
// make cleanup inline sothat validation of existence of these directories
// can be done
mr.setInlineCleanupThreads();
TestMiniMRWithDFS.runPI(mr, mr.createJobConf());
// run the wordcount example with caching
JobConf job = mr.createJobConf();
TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
TEST_ROOT_DIR + "/wc/output",
TEST_ROOT_DIR + "/cachedir",
job,
"The quick brown fox\n"
+ "has many silly\n"
+ "red fox sox\n");
// assert the number of lines read during caching
assertTrue("Failed test archives not matching", ret.isOutputOk);
// test the task report fetchers
JobClient client = new JobClient(job);
JobID jobid = ret.job.getID();
TaskReport[] reports;
reports = client.getSetupTaskReports(jobid);
assertEquals("number of setups", 2, reports.length);
reports = client.getMapTaskReports(jobid);
assertEquals("number of maps", 1, reports.length);
reports = client.getReduceTaskReports(jobid);
assertEquals("number of reduces", 1, reports.length);
reports = client.getCleanupTaskReports(jobid);
assertEquals("number of cleanups", 2, reports.length);
Counters counters = ret.job.getCounters();
assertEquals("number of map inputs", 3,
counters.getCounter(TaskCounter.MAP_INPUT_RECORDS));
assertEquals("number of reduce outputs", 9,
counters.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS));
runCustomFormats(mr);
runSecondarySort(mr.createJobConf());
} finally {
if (mr != null) { mr.shutdown(); }
}
}