當前位置: 首頁>>代碼示例>>Java>>正文


Java OutputCollector類代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.OutputCollector的典型用法代碼示例。如果您正苦於以下問題:Java OutputCollector類的具體用法?Java OutputCollector怎麽用?Java OutputCollector使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


OutputCollector類屬於org.apache.hadoop.mapred包,在下文中一共展示了OutputCollector類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: map

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Override
public void map(ImmutableBytesWritable key, Result value,
    OutputCollector<NullWritable,NullWritable> output,
    Reporter reporter) throws IOException {
  for (Cell cell : value.listCells()) {
    reporter.getCounter(TestTableInputFormat.class.getName() + ":row",
        Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()))
        .increment(1l);
    reporter.getCounter(TestTableInputFormat.class.getName() + ":family",
        Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()))
        .increment(1l);
    reporter.getCounter(TestTableInputFormat.class.getName() + ":value",
        Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()))
        .increment(1l);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestTableInputFormat.java

示例2: sinkConfInit

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Override
public void sinkConfInit(FlowProcess<JobConf> flowProcess, Tap<JobConf, RecordReader, OutputCollector> tap, JobConf conf) {

    conf.setOutputFormat(EsOutputFormat.class);
    // define an output dir to prevent Cascading from setting up a TempHfs and overriding the OutputFormat
    Settings set = loadSettings(conf, false);

    Log log = LogFactory.getLog(EsTap.class);
    InitializationUtils.setValueWriterIfNotSet(set, CascadingValueWriter.class, log);
    InitializationUtils.setValueReaderIfNotSet(set, JdkValueReader.class, log);
    InitializationUtils.setBytesConverterIfNeeded(set, CascadingLocalBytesConverter.class, log);
    InitializationUtils.setFieldExtractorIfNotSet(set, CascadingFieldExtractor.class, log);

    // NB: we need to set this property even though it is not being used - and since and URI causes problem, use only the resource/file
    //conf.set("mapred.output.dir", set.getTargetUri() + "/" + set.getTargetResource());
    HadoopCfgUtils.setFileOutputFormatDir(conf, set.getResourceWrite());
    HadoopCfgUtils.setOutputCommitterClass(conf, EsOutputFormat.EsOldAPIOutputCommitter.class.getName());

    if (log.isTraceEnabled()) {
        log.trace("Initialized (sink) configuration " + HadoopCfgUtils.asProperties(conf));
    }
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:23,代碼來源:EsHadoopScheme.java

示例3: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Test
public void reduce() {
    MaxTemperatureMapRed.MaxTemperatureReduce maxTemperatureReduce = new MaxTemperatureMapRed.MaxTemperatureReduce();
    try {
        List<IntWritable> list = new ArrayList<IntWritable>();
        list.add(new IntWritable(12));
        list.add(new IntWritable(31));
        list.add(new IntWritable(45));
        list.add(new IntWritable(23));
        list.add(new IntWritable(21));
        maxTemperatureReduce.reduce(new Text("1901"), list.iterator(), new OutputCollector<Text, IntWritable>() {
            @Override
            public void collect(final Text text, final IntWritable intWritable) throws IOException {
                log.info(text.toString() + "  " + intWritable.get());
            }
        }, null);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-mapreduce,代碼行數:21,代碼來源:MaxTemperatureMapRedTest.java

示例4: collectStats

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Override // IOMapperBase
void collectStats(OutputCollector<Text, Text> output, 
                  String name,
                  long execTime, 
                  Long objSize) throws IOException {
  long totalSize = objSize.longValue();
  float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
  LOG.info("Number of bytes processed = " + totalSize);
  LOG.info("Exec time = " + execTime);
  LOG.info("IO rate = " + ioRateMbSec);
  
  output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
      new Text(String.valueOf(1)));
  output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
      new Text(String.valueOf(totalSize)));
  output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
      new Text(String.valueOf(execTime)));
  output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
      new Text(String.valueOf(ioRateMbSec*1000)));
  output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
      new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:TestDFSIO.java

示例5: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
/**
 * Process all of the keys and values. Start up the application if we haven't
 * started it yet.
 */
public void reduce(K2 key, Iterator<V2> values, 
                   OutputCollector<K3, V3> output, Reporter reporter
                   ) throws IOException {
  isOk = false;
  startApplication(output, reporter);
  downlink.reduceKey(key);
  while (values.hasNext()) {
    downlink.reduceValue(values.next());
  }
  if(skipping) {
    //flush the streams on every record input if running in skip mode
    //so that we don't buffer other records surrounding a bad record.
    downlink.flush();
  }
  isOk = true;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:PipesReducer.java

示例6: startApplication

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
private void startApplication(OutputCollector<K3, V3> output, Reporter reporter) throws IOException {
  if (application == null) {
    try {
      LOG.info("starting application");
      application = 
        new Application<K2, V2, K3, V3>(
            job, null, output, reporter, 
            (Class<? extends K3>) job.getOutputKeyClass(), 
            (Class<? extends V3>) job.getOutputValueClass());
      downlink = application.getDownlink();
    } catch (InterruptedException ie) {
      throw new RuntimeException("interrupted", ie);
    }
    int reduce=0;
    downlink.runReduce(reduce, Submitter.getIsJavaRecordWriter(job));
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:PipesReducer.java

示例7: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
/** Combines values for a given key.  
 * @param key the key is expected to be a Text object, whose prefix indicates
 * the type of aggregation to aggregate the values. 
 * @param values the values to combine
 * @param output to collect combined values
 */
public void reduce(Text key, Iterator<Text> values,
                   OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
  String keyStr = key.toString();
  int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
  String type = keyStr.substring(0, pos);
  ValueAggregator aggregator = ValueAggregatorBaseDescriptor
    .generateValueAggregator(type);
  while (values.hasNext()) {
    aggregator.addNextValue(values.next());
  }
  Iterator outputs = aggregator.getCombinerOutput().iterator();

  while (outputs.hasNext()) {
    Object v = outputs.next();
    if (v instanceof Text) {
      output.collect(key, (Text)v);
    } else {
      output.collect(key, new Text(v.toString()));
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:ValueAggregatorCombiner.java

示例8: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
public void reduce(IntWritable key, Iterator<Text> values,
    OutputCollector<Text, Text> out,
    Reporter reporter) throws IOException {
  keyVal = key.get();
  while(values.hasNext()) {
    Text value = values.next();
    String towrite = value.toString() + "\n";
    indexStream.write(towrite.getBytes(Charsets.UTF_8));
    written++;
    if (written > numIndexes -1) {
      // every 1000 indexes we report status
      reporter.setStatus("Creating index for archives");
      reporter.progress();
      endIndex = keyVal;
      String masterWrite = startIndex + " " + endIndex + " " + startPos 
                          +  " " + indexStream.getPos() + " \n" ;
      outStream.write(masterWrite.getBytes(Charsets.UTF_8));
      startPos = indexStream.getPos();
      startIndex = endIndex;
      written = 0;
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:HadoopArchives.java

示例9: map

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
/** Run a FileOperation */
public void map(Text key, FileOperation value,
    OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
    ) throws IOException {
  try {
    value.run(jobconf);
    ++succeedcount;
    reporter.incrCounter(Counter.SUCCEED, 1);
  } catch (IOException e) {
    ++failcount;
    reporter.incrCounter(Counter.FAIL, 1);

    String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
    out.collect(null, new Text(s));
    LOG.info(s);
  } finally {
    reporter.setStatus(getCountString());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:DistCh.java

示例10: map

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
public void map(Object key, Object value,
                OutputCollector output, Reporter reporter) throws IOException {
  if (this.reporter == null) {
    this.reporter = reporter;
  }
  addLongValue("totalCount", 1);
  TaggedMapOutput aRecord = generateTaggedMapOutput(value);
  if (aRecord == null) {
    addLongValue("discardedCount", 1);
    return;
  }
  Text groupKey = generateGroupKey(aRecord);
  if (groupKey == null) {
    addLongValue("nullGroupKeyCount", 1);
    return;
  }
  output.collect(groupKey, aRecord);
  addLongValue("collectedCount", 1);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:DataJoinMapperBase.java

示例11: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
public void reduce(Object key, Iterator values,
                   OutputCollector output, Reporter reporter) throws IOException {
  if (this.reporter == null) {
    this.reporter = reporter;
  }

  SortedMap<Object, ResetableIterator> groups = regroup(key, values, reporter);
  Object[] tags = groups.keySet().toArray();
  ResetableIterator[] groupValues = new ResetableIterator[tags.length];
  for (int i = 0; i < tags.length; i++) {
    groupValues[i] = groups.get(tags[i]);
  }
  joinAndCollect(tags, groupValues, key, output, reporter);
  addLongValue("groupCount", 1);
  for (int i = 0; i < tags.length; i++) {
    groupValues[i].close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:DataJoinReducerBase.java

示例12: joinAndCollect

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
/**
 * Perform the actual join recursively.
 * 
 * @param tags
 *          a list of input tags
 * @param values
 *          a list of value lists, each corresponding to one input source
 * @param pos
 *          indicating the next value list to be joined
 * @param partialList
 *          a list of values, each from one value list considered so far.
 * @param key
 * @param output
 * @throws IOException
 */
private void joinAndCollect(Object[] tags, ResetableIterator[] values,
                            int pos, Object[] partialList, Object key,
                            OutputCollector output, Reporter reporter) throws IOException {

  if (values.length == pos) {
    // get a value from each source. Combine them
    TaggedMapOutput combined = combine(tags, partialList);
    collect(key, combined, output, reporter);
    return;
  }
  ResetableIterator nextValues = values[pos];
  nextValues.reset();
  while (nextValues.hasNext()) {
    Object v = nextValues.next();
    partialList[pos] = v;
    joinAndCollect(tags, values, pos + 1, partialList, key, output, reporter);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:DataJoinReducerBase.java

示例13: reduce

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Override
public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
    OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter)
    throws IOException {
  String strKey = Bytes.toString(key.get());
  List<Put> result = new ArrayList<Put>();
  while (values.hasNext())
    result.add(values.next());

  if (relation.keySet().contains(strKey)) {
    Set<String> set = relation.get(strKey);
    if (set != null) {
      assertEquals(set.size(), result.size());
    } else {
      throwAccertionError("Test infrastructure error: set is null");
    }
  } else {
    throwAccertionError("Test infrastructure error: key not found in map");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TestTableMapReduceUtil.java

示例14: map

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Override
public void map(ImmutableBytesWritable row, Result result,
    OutputCollector<ImmutableBytesWritable, Put> outCollector,
    Reporter reporter) throws IOException {
  String rowKey = Bytes.toString(result.getRow());
  final ImmutableBytesWritable pKey = new ImmutableBytesWritable(
      Bytes.toBytes(PRESIDENT_PATTERN));
  final ImmutableBytesWritable aKey = new ImmutableBytesWritable(
      Bytes.toBytes(ACTOR_PATTERN));
  ImmutableBytesWritable outKey = null;

  if (rowKey.startsWith(PRESIDENT_PATTERN)) {
    outKey = pKey;
  } else if (rowKey.startsWith(ACTOR_PATTERN)) {
    outKey = aKey;
  } else {
    throw new AssertionError("unexpected rowKey");
  }

  String name = Bytes.toString(result.getValue(COLUMN_FAMILY,
      COLUMN_QUALIFIER));
  outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).add(
      COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name)));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestTableMapReduceUtil.java

示例15: shouldCollectPredefinedTimes

import org.apache.hadoop.mapred.OutputCollector; //導入依賴的package包/類
@Test
@SuppressWarnings({ "deprecation", "unchecked" })
public void shouldCollectPredefinedTimes() throws IOException {
  int recordNumber = 999;
  Result resultMock = mock(Result.class);
  IdentityTableMap identityTableMap = null;
  try {
    Reporter reporterMock = mock(Reporter.class);
    identityTableMap = new IdentityTableMap();
    ImmutableBytesWritable bytesWritableMock = mock(ImmutableBytesWritable.class);
    OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock =
        mock(OutputCollector.class);

    for (int i = 0; i < recordNumber; i++)
      identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock,
          reporterMock);

    verify(outputCollectorMock, times(recordNumber)).collect(
        Mockito.any(ImmutableBytesWritable.class), Mockito.any(Result.class));
  } finally {
    if (identityTableMap != null)
      identityTableMap.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestIdentityTableMap.java


注:本文中的org.apache.hadoop.mapred.OutputCollector類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。