当前位置: 首页>>代码示例>>Java>>正文


Java GenericCounter类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.counters.GenericCounter的典型用法代码示例。如果您正苦于以下问题:Java GenericCounter类的具体用法?Java GenericCounter怎么用?Java GenericCounter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


GenericCounter类属于org.apache.hadoop.mapreduce.counters包,在下文中一共展示了GenericCounter类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@BeforeMethod(alwaysRun = true)
public void setUp() {
  conf = new Configuration();
  conf.setIfUnset(MRJobConstants.TO_INTERMEDIATE_DATA_FORMAT,
      CSVIntermediateDataFormat.class.getName());
  jobContextMock = mock(TaskAttemptContext.class);
  GenericCounter counter = new GenericCounter("test", "test-me");
  when(((TaskAttemptContext) jobContextMock).getCounter(SqoopCounters.ROWS_WRITTEN)).thenReturn(counter);
  org.apache.hadoop.mapred.JobConf testConf = new org.apache.hadoop.mapred.JobConf();
  when(jobContextMock.getConfiguration()).thenReturn(testConf);
}
 
开发者ID:vybs,项目名称:sqoop-on-spark,代码行数:12,代码来源:TestSqoopLoader.java

示例2: open

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Override
public void open() throws HyracksDataException {
    first = true;
    groupStarted = false;
    group = new ArrayList<>();
    bPtr = 0;
    group.add(new VSizeFrame(ctx));
    fta = new FrameTupleAppender();
    keyCounter = new GenericCounter();
    valueCounter = new GenericCounter();
}
 
开发者ID:apache,项目名称:incubator-asterixdb-hyracks,代码行数:12,代码来源:ReduceWriter.java

示例3: Counter

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
public Counter() {
  this(new GenericCounter());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:Counters.java

示例4: newCounter

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Override
protected Counter newCounter(String counterName, String displayName,
                             long value) {
  return new Counter(new GenericCounter(counterName, displayName, value));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:Counters.java

示例5: newCounter

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Override
protected Counter newCounter(String name, String displayName, long value) {
  return new GenericCounter(name, displayName, value);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:Counters.java

示例6: testLoadJobLoadReducer

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestGridMixClasses.java

示例7: testSleepReducer

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestGridMixClasses.java

示例8: testLoadJobLoadReducer

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Test (timeout=1000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:44,代码来源:TestGridMixClasses.java

示例9: testSleepReducer

import org.apache.hadoop.mapreduce.counters.GenericCounter; //导入依赖的package包/类
@Test (timeout=1000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:44,代码来源:TestGridMixClasses.java


注:本文中的org.apache.hadoop.mapreduce.counters.GenericCounter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。