当前位置: 首页>>代码示例>>Java>>正文


Java Reporter类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.Reporter的典型用法代码示例。如果您正苦于以下问题:Java Reporter类的具体用法?Java Reporter怎么用?Java Reporter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Reporter类属于org.apache.hadoop.mapred包,在下文中一共展示了Reporter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@Override
public void map(ImmutableBytesWritable key, Result value,
    OutputCollector<NullWritable,NullWritable> output,
    Reporter reporter) throws IOException {
  for (Cell cell : value.listCells()) {
    reporter.getCounter(TestTableInputFormat.class.getName() + ":row",
        Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()))
        .increment(1l);
    reporter.getCounter(TestTableInputFormat.class.getName() + ":family",
        Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()))
        .increment(1l);
    reporter.getCounter(TestTableInputFormat.class.getName() + ":value",
        Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))
        .increment(1l);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestTableInputFormat.java

示例2: doIO

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@Override // IOMapperBase
public Long doIO(Reporter reporter, 
                   String name, 
                   long totalSize // in bytes
                 ) throws IOException {
  OutputStream out = (OutputStream)this.stream;
  // write to the file
  long nrRemaining;
  for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
    int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining;
    out.write(buffer, 0, curSize);
    reporter.setStatus("writing " + name + "@" + 
                       (totalSize - nrRemaining) + "/" + totalSize 
                       + " ::host = " + hostName);
  }
  return Long.valueOf(totalSize);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSIO.java

示例3: reduce

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
public void reduce(Object key, Iterator values,
                   OutputCollector output, Reporter reporter) throws IOException {
  if (this.reporter == null) {
    this.reporter = reporter;
  }

  SortedMap<Object, ResetableIterator> groups = regroup(key, values, reporter);
  Object[] tags = groups.keySet().toArray();
  ResetableIterator[] groupValues = new ResetableIterator[tags.length];
  for (int i = 0; i < tags.length; i++) {
    groupValues[i] = groups.get(tags[i]);
  }
  joinAndCollect(tags, groupValues, key, output, reporter);
  addLongValue("groupCount", 1);
  for (int i = 0; i < tags.length; i++) {
    groupValues[i].close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DataJoinReducerBase.java

示例4: testInitNextRecordReader

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void testInitNextRecordReader() throws IOException{
  JobConf conf = new JobConf();
  Path[] paths = new Path[3];
  long[] fileLength = new long[3];
  File[] files = new File[3];
  LongWritable key = new LongWritable(1);
  Text value = new Text();
  try {
    for(int i=0;i<3;i++){
      fileLength[i] = i;
      File dir = new File(outDir.toString());
      dir.mkdir();
      files[i] = new File(dir,"testfile"+i);
      FileWriter fileWriter = new FileWriter(files[i]);
      fileWriter.close();
      paths[i] = new Path(outDir+"/testfile"+i);
    }
    CombineFileSplit combineFileSplit = new CombineFileSplit(conf, paths, fileLength);
    Reporter reporter = Mockito.mock(Reporter.class);
    CombineFileRecordReader cfrr = new CombineFileRecordReader(conf, combineFileSplit,
      reporter,  TextRecordReaderWrapper.class);
    verify(reporter).progress();
    Assert.assertFalse(cfrr.next(key,value));
    verify(reporter, times(3)).progress();
  } finally {
    FileUtil.fullyDelete(new File(outDir.toString()));
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestCombineFileRecordReader.java

示例5: map

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@Override
public void map(ImmutableBytesWritable row, Result result,
    OutputCollector<ImmutableBytesWritable, Put> outCollector,
    Reporter reporter) throws IOException {
  String rowKey = Bytes.toString(result.getRow());
  final ImmutableBytesWritable pKey = new ImmutableBytesWritable(
      Bytes.toBytes(PRESIDENT_PATTERN));
  final ImmutableBytesWritable aKey = new ImmutableBytesWritable(
      Bytes.toBytes(ACTOR_PATTERN));
  ImmutableBytesWritable outKey = null;

  if (rowKey.startsWith(PRESIDENT_PATTERN)) {
    outKey = pKey;
  } else if (rowKey.startsWith(ACTOR_PATTERN)) {
    outKey = aKey;
  } else {
    throw new AssertionError("unexpected rowKey");
  }

  String name = Bytes.toString(result.getValue(COLUMN_FAMILY,
      COLUMN_QUALIFIER));
  outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).add(
      COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestTableMapReduceUtil.java

示例6: testDBInputFormat

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
/**
 * test DBInputFormat class. Class should split result for chunks
 * @throws Exception
 */
@Test(timeout = 10000)
public void testDBInputFormat() throws Exception {
  JobConf configuration = new JobConf();
  setupDriver(configuration);
  
  DBInputFormat<NullDBWritable> format = new DBInputFormat<NullDBWritable>();
  format.setConf(configuration);
  format.setConf(configuration);
  DBInputFormat.DBInputSplit splitter = new DBInputFormat.DBInputSplit(1, 10);
  Reporter reporter = mock(Reporter.class);
  RecordReader<LongWritable, NullDBWritable> reader = format.getRecordReader(
      splitter, configuration, reporter);

  configuration.setInt(MRJobConfig.NUM_MAPS, 3);
  InputSplit[] lSplits = format.getSplits(configuration, 3);
  assertEquals(5, lSplits[0].getLength());
  assertEquals(3, lSplits.length);

  // test reader .Some simple tests
  assertEquals(LongWritable.class, reader.createKey().getClass());
  assertEquals(0, reader.getPos());
  assertEquals(0, reader.getProgress(), 0.001);
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestDBInputFormat.java

示例7: shouldCollectPredefinedTimes

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@Test
@SuppressWarnings({ "deprecation", "unchecked" })
public void shouldCollectPredefinedTimes() throws IOException {
  int recordNumber = 999;
  Result resultMock = mock(Result.class);
  IdentityTableMap identityTableMap = null;
  try {
    Reporter reporterMock = mock(Reporter.class);
    identityTableMap = new IdentityTableMap();
    ImmutableBytesWritable bytesWritableMock = mock(ImmutableBytesWritable.class);
    OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
        mock(OutputCollector.class);

    for (int i = 0; i < recordNumber; i++)
      identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock,
          reporterMock);

    verify(outputCollectorMock, times(recordNumber)).collect(
        Mockito.any(ImmutableBytesWritable.class), Mockito.any(Result.class));
  } finally {
    if (identityTableMap != null)
      identityTableMap.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestIdentityTableMap.java

示例8: reduce

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
/**
 * Process all of the keys and values. Start up the application if we haven't
 * started it yet.
 */
public void reduce(K2 key, Iterator<V2> values, 
                   OutputCollector<K3, V3> output, Reporter reporter
                   ) throws IOException {
  isOk = false;
  startApplication(output, reporter);
  downlink.reduceKey(key);
  while (values.hasNext()) {
    downlink.reduceValue(values.next());
  }
  if(skipping) {
    //flush the streams on every record input if running in skip mode
    //so that we don't buffer other records surrounding a bad record.
    downlink.flush();
  }
  isOk = true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:PipesReducer.java

示例9: startApplication

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void startApplication(OutputCollector<K3, V3> output, Reporter reporter) throws IOException {
  if (application == null) {
    try {
      LOG.info("starting application");
      application = 
        new Application<K2, V2, K3, V3>(
            job, null, output, reporter, 
            (Class<? extends K3>) job.getOutputKeyClass(), 
            (Class<? extends V3>) job.getOutputValueClass());
      downlink = application.getDownlink();
    } catch (InterruptedException ie) {
      throw new RuntimeException("interrupted", ie);
    }
    int reduce=0;
    downlink.runReduce(reduce, Submitter.getIsJavaRecordWriter(job));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:PipesReducer.java

示例10: reduce

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
public void reduce(IntWritable key, Iterator<Text> values,
    OutputCollector<Text, Text> out,
    Reporter reporter) throws IOException {
  keyVal = key.get();
  while(values.hasNext()) {
    Text value = values.next();
    String towrite = value.toString() + "\n";
    indexStream.write(towrite.getBytes(Charsets.UTF_8));
    written++;
    if (written > numIndexes -1) {
      // every 1000 indexes we report status
      reporter.setStatus("Creating index for archives");
      reporter.progress();
      endIndex = keyVal;
      String masterWrite = startIndex + " " + endIndex + " " + startPos 
                          +  " " + indexStream.getPos() + " \n" ;
      outStream.write(masterWrite.getBytes(Charsets.UTF_8));
      startPos = indexStream.getPos();
      startIndex = endIndex;
      written = 0;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HadoopArchives.java

示例11: reduce

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
/** Combines values for a given key.  
 * @param key the key is expected to be a Text object, whose prefix indicates
 * the type of aggregation to aggregate the values. 
 * @param values the values to combine
 * @param output to collect combined values
 */
public void reduce(Text key, Iterator<Text> values,
                   OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
  String keyStr = key.toString();
  int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
  String type = keyStr.substring(0, pos);
  ValueAggregator aggregator = ValueAggregatorBaseDescriptor
    .generateValueAggregator(type);
  while (values.hasNext()) {
    aggregator.addNextValue(values.next());
  }
  Iterator outputs = aggregator.getCombinerOutput().iterator();

  while (outputs.hasNext()) {
    Object v = outputs.next();
    if (v instanceof Text) {
      output.collect(key, (Text)v);
    } else {
      output.collect(key, new Text(v.toString()));
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:ValueAggregatorCombiner.java

示例12: getSample

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
/**
 * For each split sampled, emit when the ratio of the number of records
 * retained to the total record count is less than the specified
 * frequency.
 */
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, JobConf job) throws IOException {
  InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks());
  ArrayList<K> samples = new ArrayList<K>();
  int splitsToSample = Math.min(maxSplitsSampled, splits.length);
  int splitStep = splits.length / splitsToSample;
  long records = 0;
  long kept = 0;
  for (int i = 0; i < splitsToSample; ++i) {
    RecordReader<K,V> reader = inf.getRecordReader(splits[i * splitStep],
        job, Reporter.NULL);
    K key = reader.createKey();
    V value = reader.createValue();
    while (reader.next(key, value)) {
      ++records;
      if ((double) kept / records < freq) {
        ++kept;
        samples.add(key);
        key = reader.createKey();
      }
    }
    reader.close();
  }
  return (K[])samples.toArray();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:InputSampler.java

示例13: map

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
/**
 * Pass the key, and reversed value to reduce
 */
public void map(ImmutableBytesWritable key, Result value,
  OutputCollector<ImmutableBytesWritable, Put> output,
  Reporter reporter)
throws IOException {
  output.collect(key, TestTableMapReduceBase.map(key, value));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestTableMapReduce.java

示例14: copyData

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
public void copyData(Path input, FSDataInputStream fsin, 
    FSDataOutputStream fout, Reporter reporter) throws IOException {
  try {
    for (int cbread=0; (cbread = fsin.read(buffer))>= 0;) {
      fout.write(buffer, 0,cbread);
      reporter.progress();
    }
  } finally {
    fsin.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:HadoopArchives.java

示例15: testCopyFromHostCompressFailure

import org.apache.hadoop.mapred.Reporter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test(timeout=10000) 
public void testCopyFromHostCompressFailure() throws Exception {
  InMemoryMapOutput<Text, Text> immo = mock(InMemoryMapOutput.class);

  Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
      r, metrics, except, key, connection);

  String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
  
  when(connection.getResponseCode()).thenReturn(200);
  when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
      .thenReturn(replyHash);
  ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
  ByteArrayOutputStream bout = new ByteArrayOutputStream();
  header.write(new DataOutputStream(bout));
  ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
  when(connection.getInputStream()).thenReturn(in);
  when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
      .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
  when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
      .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
  when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
      .thenReturn(immo);
  
  doThrow(new java.lang.InternalError()).when(immo)
      .shuffle(any(MapHost.class), any(InputStream.class), anyLong(), 
          anyLong(), any(ShuffleClientMetrics.class), any(Reporter.class));

  underTest.copyFromHost(host);
     
  verify(connection)
      .addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, 
        encHash);
  verify(ss, times(1)).copyFailed(map1ID, host, true, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestFetcher.java


注:本文中的org.apache.hadoop.mapred.Reporter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。