当前位置: 首页>>代码示例>>Java>>正文


Java TestResult类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.MRCaching.TestResult的典型用法代码示例。如果您正苦于以下问题:Java TestResult类的具体用法?Java TestResult怎么用?Java TestResult使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TestResult类属于org.apache.hadoop.mapred.MRCaching包,在下文中一共展示了TestResult类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestMiniMRDFSCaching.java

示例2: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:TestMiniMRDFSCaching.java

示例3: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    conf.set("fs.hdfs.impl",
             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getName(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:TestMiniMRDFSCaching.java

示例4: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:39,代码来源:TestMiniMRDFSCaching.java

示例5: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
@Test
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:40,代码来源:TestMiniMRDFSCaching.java

示例6: testWithDFS

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n", false);
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n", true);
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:39,代码来源:TestMiniMRDFSCaching.java

示例7: testWithLocal

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:46,代码来源:TestMiniMRLocalFS.java

示例8: testWithLocal

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:TestMiniMRLocalFS.java

示例9: testWithLocal

import org.apache.hadoop.mapred.MRCaching.TestResult; //导入依赖的package包/类
public void testWithLocal() 
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    // make cleanup inline sothat validation of existence of these directories
    // can be done
    mr.setInlineCleanupThreads();

    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(TaskCounter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:46,代码来源:TestMiniMRLocalFS.java


注:本文中的org.apache.hadoop.mapred.MRCaching.TestResult类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。