当前位置: 首页>>代码示例>>Java>>正文


Java Writer类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.IFile.Writer的典型用法代码示例。如果您正苦于以下问题:Java Writer类的具体用法?Java Writer怎么用?Java Writer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Writer类属于org.apache.hadoop.mapred.IFile包,在下文中一共展示了Writer类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: spillSingleRecord

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
/**
 * Handles the degenerate case where serialization fails to fit in
 * the in-memory buffer, so we must spill the record from collect
 * directly to a spill file. Consider this "losing".
 */
private void spillSingleRecord(final K key, final V value,
                               int partition) throws IOException {
  for (int i = 0; i < partitions; ++i) {
    // create spill file
    IFile.Writer<K, V> writer = null;
    try {
      // Create a new codec, don't care!
      writer = getFileWriter(i);
      if (i == partition) {
        writer.append(key, value);
      }
      writer.close();
      mapOutputByteCounter.increment(writer.getRawLength());
      fileOutputByteCounter.increment(writer.getCompressedLength());
      writer = null;
    } catch (IOException e) {
      if (null != writer) writer.close();
      throw e;
    }
  }
}
 
开发者ID:intel-hpdd,项目名称:lustre-connector-for-hadoop,代码行数:27,代码来源:LustreFsOutputCollector.java

示例2: writePartition

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
public long writePartition(RawKeyValueIterator kvIter, int partition) throws IOException {
  Writer<K, V> writer = getFileWriter(partition);
  try {
    if (combinerRunner == null) {
      Merger.writeFile(kvIter, writer, reporter, job);
    } else {
      combineCollector.setWriter(writer);
      try {
        combinerRunner.combine(kvIter, combineCollector);
      } catch (Throwable t) {
        throw ((t instanceof IOException) ? (IOException) t : new IOException(t));
      }
    }
  } finally {
    writer.close();
    if (combineCollector != null) {
      combineCollector.setWriter(null);
    }
  }
  return writer.getCompressedLength();
}
 
开发者ID:intel-hpdd,项目名称:lustre-connector-for-hadoop,代码行数:22,代码来源:LustreFsOutputCollector.java

示例3: testCustomCollect

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
@Test
public void testCustomCollect() throws Throwable {
  //mock creation
  TaskReporter mockTaskReporter = mock(TaskReporter.class);

  @SuppressWarnings("unchecked")
  Writer<String, Integer> mockWriter = mock(Writer.class);

  Configuration conf = new Configuration();
  conf.set(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, "2");
  
  coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
  coc.setWriter(mockWriter);
  verify(mockTaskReporter, never()).progress();

  coc.collect("dummy", 1);
  verify(mockTaskReporter, never()).progress();
  
  coc.collect("dummy", 2);
  verify(mockTaskReporter, times(1)).progress();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestCombineOutputCollector.java

示例4: testDefaultCollect

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
@Test
public void testDefaultCollect() throws Throwable {
  //mock creation
  TaskReporter mockTaskReporter = mock(TaskReporter.class);

  @SuppressWarnings("unchecked")
  Writer<String, Integer> mockWriter = mock(Writer.class);

  Configuration conf = new Configuration();
  
  coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
  coc.setWriter(mockWriter);
  verify(mockTaskReporter, never()).progress();

  for(int i = 0; i < Task.DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
  	coc.collect("dummy", i);
  }
  verify(mockTaskReporter, times(1)).progress();
  for(int i = 0; i < Task.DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
  	coc.collect("dummy", i);
  }
  verify(mockTaskReporter, times(2)).progress();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestCombineOutputCollector.java

示例5: writeFile

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
public static <K extends Object, V extends Object>
  void writeFile(RawKeyValueIterator records, Writer<K, V> writer, 
                 Progressable progressable, Configuration conf) 
  throws IOException {
    long progressBar = conf.getLong(JobContext.RECORDS_BEFORE_PROGRESS,
        10000);
    long recordCtr = 0;
    while(records.next()) {
      writer.append(records.getKey(), records.getValue());
      
      if (((recordCtr++) % progressBar) == 0) {
        progressable.progress();
      }
    }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:Merger.java

示例6: testCustomCollect

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
@Test
public void testCustomCollect() throws Throwable {
  //mock creation
  TaskReporter mockTaskReporter = mock(TaskReporter.class);
  Counters.Counter outCounter = new Counters.Counter();
  Writer<String, Integer> mockWriter = mock(Writer.class);

  Configuration conf = new Configuration();
  conf.set("mapred.combine.recordsBeforeProgress", "2");
  
  coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
  coc.setWriter(mockWriter);
  verify(mockTaskReporter, never()).progress();

  coc.collect("dummy", 1);
  verify(mockTaskReporter, never()).progress();
  
  coc.collect("dummy", 2);
  verify(mockTaskReporter, times(1)).progress();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:21,代码来源:TestCombineOutputCollector.java

示例7: testDefaultCollect

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
@Test
public void testDefaultCollect() throws Throwable {
  //mock creation
  TaskReporter mockTaskReporter = mock(TaskReporter.class);
  Counters.Counter outCounter = new Counters.Counter();
  Writer<String, Integer> mockWriter = mock(Writer.class);

  Configuration conf = new Configuration();
  
  coc = new CombineOutputCollector<String, Integer>(outCounter, mockTaskReporter, conf);
  coc.setWriter(mockWriter);
  verify(mockTaskReporter, never()).progress();

  for(int i = 0; i < Task.DEFAULT_MR_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
  	coc.collect("dummy", i);
  }
  verify(mockTaskReporter, times(1)).progress();
  for(int i = 0; i < Task.DEFAULT_MR_COMBINE_RECORDS_BEFORE_PROGRESS; i++) {
  	coc.collect("dummy", i);
  }
  verify(mockTaskReporter, times(2)).progress();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:TestCombineOutputCollector.java

示例8: writeFile

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
public static <K extends Object, V extends Object>
  void writeFile(RawKeyValueIterator records, Writer<K, V> writer, 
                 Progressable progressable, Configuration conf) 
  throws IOException {
    long progressBar = conf.getLong("mapred.merge.recordsBeforeProgress",
        10000);
    long recordCtr = 0;
    while(records.next()) {
      writer.append(records.getKey(), records.getValue());
      
      if (((recordCtr++) % progressBar) == 0) {
        progressable.progress();
      }
      
    }
}
 
开发者ID:mammothcm,项目名称:mammoth,代码行数:17,代码来源:Merger.java

示例9: getFileWriter

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
public Writer<K, V> getFileWriter(int partition) throws IOException {
  int spillIndex, mapid = getTaskID().getTaskID().getId();
  spillIndex = spillIndices[partition]++;
  Path path = new Path(SharedFsPlugins.getTempPath(job, getTaskID().getJobID()), 
      String.format(SharedFsPlugins.MAP_OUTPUT, partition, mapid, spillIndex));
  return new Writer<K, V>(job, lustrefs.create(path), keyClass, valClass, codec, spilledRecordsCounter, true);
}
 
开发者ID:intel-hpdd,项目名称:lustre-connector-for-hadoop,代码行数:8,代码来源:LustreFsOutputCollector.java

示例10: createSpillFile

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
private Writer<K,V> createSpillFile() throws IOException {
  Path tmp =
      new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
          + (spillNumber++) + ".out");

  LOG.info("Created file: " + tmp);

  file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(), 
      -1, conf);
  FSDataOutputStream out = fs.create(file);
  out = CryptoUtils.wrapIfNecessary(conf, out);
  return new Writer<K, V>(conf, out, null, null, null, null, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:BackupStore.java

示例11: merge

import org.apache.hadoop.mapred.IFile.Writer; //导入依赖的package包/类
@Override
public void merge(List<InMemoryMapOutput<K, V>> inputs) throws IOException {
  if (inputs == null || inputs.size() == 0) {
    return;
  }

  TaskAttemptID dummyMapId = inputs.get(0).getMapId(); 
  List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
  long mergeOutputSize = 
    createInMemorySegments(inputs, inMemorySegments, 0);
  int noInMemorySegments = inMemorySegments.size();
  
  InMemoryMapOutput<K, V> mergedMapOutputs = 
    unconditionalReserve(dummyMapId, mergeOutputSize, false);
  
  Writer<K, V> writer = 
    new InMemoryWriter<K, V>(mergedMapOutputs.getArrayStream());
  
  LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments +
           " segments of total-size: " + mergeOutputSize);

  RawKeyValueIterator rIter = 
    Merger.merge(jobConf, rfs,
                 (Class<K>)jobConf.getMapOutputKeyClass(),
                 (Class<V>)jobConf.getMapOutputValueClass(),
                 inMemorySegments, inMemorySegments.size(),
                 new Path(reduceId.toString()),
                 (RawComparator<K>)jobConf.getOutputKeyComparator(),
                 reporter, null, null, null);
  Merger.writeFile(rIter, writer, reporter, jobConf);
  writer.close();

  LOG.info(reduceId +  
           " Memory-to-Memory merge of the " + noInMemorySegments +
           " files in-memory complete.");

  // Note the output of the merge
  closeInMemoryMergedFile(mergedMapOutputs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:MergeManagerImpl.java


注:本文中的org.apache.hadoop.mapred.IFile.Writer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。