当前位置: 首页>>代码示例>>Java>>正文


Java NullOutputFormat类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.output.NullOutputFormat的典型用法代码示例。如果您正苦于以下问题:Java NullOutputFormat类的具体用法?Java NullOutputFormat怎么用?Java NullOutputFormat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NullOutputFormat类属于org.apache.hadoop.mapreduce.lib.output包,在下文中一共展示了NullOutputFormat类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createJob

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:CredentialsTestJob.java

示例2: createJob

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public Job createJob(boolean failMappers, boolean failReducers, Path inputFile) 
    throws IOException {
  Configuration conf = getConf();
  conf.setBoolean(FAIL_MAP, failMappers);
  conf.setBoolean(FAIL_REDUCE, failReducers);
  Job job = Job.getInstance(conf, "fail");
  job.setJarByClass(FailJob.class);
  job.setMapperClass(FailMapper.class);
  job.setMapOutputKeyClass(LongWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(FailReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(TextInputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Fail job");
  FileInputFormat.addInputPath(job, inputFile);
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FailJob.java

示例3: runTest

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private static void runTest(String name, Job job) throws Exception {
  job.setNumReduceTasks(1);
  job.getConfiguration().set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  job.getConfiguration().setInt(MRJobConfig.IO_SORT_FACTOR, 1000);
  job.getConfiguration().set("fs.defaultFS", "file:///");
  job.getConfiguration().setInt("test.mapcollection.num.maps", 1);
  job.setInputFormatClass(FakeIF.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(SpillReducer.class);
  job.setMapOutputKeyClass(KeyWritable.class);
  job.setMapOutputValueClass(ValWritable.class);
  job.setSortComparatorClass(VariableComparator.class);

  LOG.info("Running " + name);
  assertTrue("Job failed!", job.waitForCompletion(false));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestMapCollection.java

示例4: createJob

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public Job createJob(int numMapper, int numReducer, 
                     long mapSleepTime, int mapSleepCount, 
                     long reduceSleepTime, int reduceSleepCount) 
    throws IOException {
  Configuration conf = getConf();
  conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
  conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
  conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
  conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
  Job job = Job.getInstance(conf, "sleep");
  job.setNumReduceTasks(numReducer);
  job.setJarByClass(SleepJob.class);
  job.setMapperClass(SleepMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(SleepReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(SleepInputFormat.class);
  job.setPartitionerClass(SleepJobPartitioner.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Sleep job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:SleepJob.java

示例5: call

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
@Override
public Job call() throws IOException, InterruptedException,
                         ClassNotFoundException {
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  ugi.doAs( new PrivilegedExceptionAction <Job>() {
     public Job run() throws IOException, ClassNotFoundException,
                             InterruptedException {
      job.setMapperClass(GenDCDataMapper.class);
      job.setNumReduceTasks(0);
      job.setMapOutputKeyClass(NullWritable.class);
      job.setMapOutputValueClass(BytesWritable.class);
      job.setInputFormatClass(GenDCDataFormat.class);
      job.setOutputFormatClass(NullOutputFormat.class);
      job.setJarByClass(GenerateDistCacheData.class);
      try {
        FileInputFormat.addInputPath(job, new Path("ignored"));
      } catch (IOException e) {
        LOG.error("Error while adding input path ", e);
      }
      job.submit();
      return job;
    }
  });
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:GenerateDistCacheData.java

示例6: testInputFormat

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
void testInputFormat(Class<? extends InputFormat> clazz)
    throws IOException, InterruptedException, ClassNotFoundException {
  final Job job = MapreduceTestingShim.createJob(UTIL.getConfiguration());
  job.setInputFormatClass(clazz);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);

  LOG.debug("submitting job.");
  assertTrue("job failed!", job.waitForCompletion(true));
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue());
  assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue());
  assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue());
  assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestTableInputFormat.java

示例7: runTestOnTable

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private void runTestOnTable() throws InterruptedException, ClassNotFoundException {
    Job job = null;
    try {
        Configuration conf = graph.configuration().toHBaseConfiguration();
        job = Job.getInstance(conf, "test123");
        job.setOutputFormatClass(NullOutputFormat.class);
        job.setNumReduceTasks(0);
        Scan scan = new Scan();
        scan.addColumn(FAMILY_NAME, COLUMN_NAME);
        scan.setTimeRange(MINSTAMP, MAXSTAMP);
        scan.setMaxVersions();
        TableMapReduceUtil.initTableMapperJob(TABLE_NAME.getNameAsString(),
                scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job,
                true, TableInputFormat.class);
        job.waitForCompletion(true);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } finally {
        if (job != null) {
            FileUtil.fullyDelete(
                    new File(job.getConfiguration().get("hadoop.tmp.dir")));
        }
    }
}
 
开发者ID:rayokota,项目名称:hgraphdb,代码行数:26,代码来源:TableInputFormatTest.java

示例8: runTestOnTable

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private void runTestOnTable()
throws IOException, InterruptedException, ClassNotFoundException {
  UTIL.startMiniMapReduceCluster(1);
  Job job = null;
  try {
    job = new Job(UTIL.getConfiguration(), "test123");
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
    Scan scan = new Scan();
    scan.addColumn(FAMILY_NAME, COLUMN_NAME);
    scan.setTimeRange(MINSTAMP, MAXSTAMP);
    scan.setMaxVersions();
    TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME),
      scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job);
    job.waitForCompletion(true);
  } catch (IOException e) {
    // TODO Auto-generated catch block
    e.printStackTrace();
  } finally {
    UTIL.shutdownMiniMapReduceCluster();
    if (job != null) {
      FileUtil.fullyDelete(
        new File(job.getConfiguration().get("hadoop.tmp.dir")));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:TestTimeRangeMapRed.java

示例9: testOneRemoteJT

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public void testOneRemoteJT() throws Exception {
  LOG.info("Starting testOneRemoteJT");
  String[] racks = "/rack-1".split(",");
  String[] trackers = "tracker-1".split(",");
  corona = new MiniCoronaCluster.Builder().numTaskTrackers(1).racks(racks)
      .hosts(trackers).build();
  Configuration conf = corona.createJobConf();
  conf.set("mapred.job.tracker", "corona");
  conf.set("mapred.job.tracker.class", CoronaJobTracker.class.getName());
  String locationsCsv = "tracker-1";
  conf.set("test.locations", locationsCsv);
  conf.setBoolean("mapred.coronajobtracker.forceremote", true);
  Job job = new Job(conf);
  job.setMapperClass(TstJob.TestMapper.class);
  job.setInputFormatClass(TstJob.TestInputFormat.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setNumReduceTasks(0);
  job.getConfiguration().set("io.sort.record.pct", "0.50");
  job.getConfiguration().set("io.sort.mb", "25");
  boolean success = job.waitForCompletion(true);
  assertTrue("Job did not succeed", success);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestMiniCoronaFederatedJT.java

示例10: getVertexJobWithDefaultMapper

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private Job getVertexJobWithDefaultMapper(org.apache.hadoop.conf.Configuration c) throws IOException {

        Job job = Job.getInstance(c);

        job.setJarByClass(HadoopScanMapper.class);
        job.setJobName("testPartitionedVertexScan");
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(NullWritable.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setNumReduceTasks(0);
        job.setOutputFormatClass(NullOutputFormat.class);
        job.setInputFormatClass(CassandraInputFormat.class);

        return job;
    }
 
开发者ID:graben1437,项目名称:titan1withtp3.1,代码行数:17,代码来源:CassandraScanJobIT.java

示例11: createJob

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public Job createJob(int numMapper, int numReducer, 
                     long mapSleepTime, int mapSleepCount, 
                     long reduceSleepTime, int reduceSleepCount) 
    throws IOException {
  Configuration conf = getConf();
  conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
  conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
  conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
  conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
  Job job = Job.getInstance(conf, "sleep");
  job.setNumReduceTasks(numReducer);
  job.setJarByClass(SleepJob.class);
  job.setNumReduceTasks(numReducer);
  job.setMapperClass(SleepMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(SleepReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(SleepInputFormat.class);
  job.setPartitionerClass(SleepJobPartitioner.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Sleep job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:SleepJob.java

示例12: main

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = getInstance(conf, "Top popular airports");

    job.setMapperClass(TokenizerMapper.class);
    job.setReducerClass(SumReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    addInputPath(job, new Path(args[0]));
    job.setNumReduceTasks(20);

    job.setJarByClass(TopPopularAirportAll.class);
    exit(job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:kgrodzicki,项目名称:cloud-computing-specialization,代码行数:18,代码来源:TopPopularAirportAll.java

示例13: runTestOnTable

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private void runTestOnTable()
throws IOException, InterruptedException, ClassNotFoundException {
  UTIL.startMiniMapReduceCluster();
  Job job = null;
  try {
    job = new Job(UTIL.getConfiguration(), "test123");
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
    Scan scan = new Scan();
    scan.addColumn(FAMILY_NAME, COLUMN_NAME);
    scan.setTimeRange(MINSTAMP, MAXSTAMP);
    scan.setMaxVersions();
    TableMapReduceUtil.initTableMapperJob(TABLE_NAME,
      scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job);
    job.waitForCompletion(true);
  } catch (IOException e) {
    // TODO Auto-generated catch block
    e.printStackTrace();
  } finally {
    UTIL.shutdownMiniMapReduceCluster();
    if (job != null) {
      FileUtil.fullyDelete(
        new File(job.getConfiguration().get("hadoop.tmp.dir")));
    }
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:27,代码来源:TestTimeRangeMapRed.java

示例14: runTestOnTable

import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; //导入依赖的package包/类
private void runTestOnTable()
throws IOException, InterruptedException, ClassNotFoundException {
  UTIL.startMiniMapReduceCluster();
  Job job = null;
  try {
    job = new Job(UTIL.getConfiguration(), "test123");
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
    Scan scan = new Scan();
    scan.addColumn(FAMILY_NAME, COLUMN_NAME);
    scan.setTimeRange(MINSTAMP, MAXSTAMP);
    scan.setMaxVersions();
    TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME),
      scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job);
    job.waitForCompletion(true);
  } catch (IOException e) {
    // TODO Auto-generated catch block
    e.printStackTrace();
  } finally {
    UTIL.shutdownMiniMapReduceCluster();
    if (job != null) {
      FileUtil.fullyDelete(
        new File(job.getConfiguration().get("hadoop.tmp.dir")));
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:TestTimeRangeMapRed.java


注:本文中的org.apache.hadoop.mapreduce.lib.output.NullOutputFormat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。