當前位置: 首頁>>代碼示例>>Java>>正文


Java OutputFormat.getRecordWriter方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.OutputFormat.getRecordWriter方法的典型用法代碼示例。如果您正苦於以下問題:Java OutputFormat.getRecordWriter方法的具體用法?Java OutputFormat.getRecordWriter怎麽用?Java OutputFormat.getRecordWriter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.OutputFormat的用法示例。


在下文中一共展示了OutputFormat.getRecordWriter方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testWriteBufferData

import org.apache.hadoop.mapred.OutputFormat; //導入方法依賴的package包/類
@Test(enabled = true)
public void testWriteBufferData() throws Exception {
  NullWritable nada = NullWritable.get();
  MneDurableOutputSession<DurableBuffer<?>> sess =
      new MneDurableOutputSession<DurableBuffer<?>>(null, m_conf,
          MneConfigHelper.DEFAULT_OUTPUT_CONFIG_PREFIX);
  MneDurableOutputValue<DurableBuffer<?>> mdvalue =
      new MneDurableOutputValue<DurableBuffer<?>>(sess);
  OutputFormat<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> outputFormat =
      new MneOutputFormat<MneDurableOutputValue<DurableBuffer<?>>>();
  RecordWriter<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> writer =
      outputFormat.getRecordWriter(m_fs, m_conf, null, null);
  DurableBuffer<?> dbuf = null;
  Checksum cs = new CRC32();
  cs.reset();
  for (int i = 0; i < m_reccnt; ++i) {
    dbuf = genupdDurableBuffer(sess, cs);
    Assert.assertNotNull(dbuf);
    writer.write(nada, mdvalue.of(dbuf));
  }
  m_checksum = cs.getValue();
  writer.close(null);
  sess.close();
}
 
開發者ID:apache,項目名稱:mnemonic,代碼行數:25,代碼來源:MneMapredBufferDataTest.java

示例2: HadoopV1OutputCollector

import org.apache.hadoop.mapred.OutputFormat; //導入方法依賴的package包/類
/**
 * @param jobConf Job configuration.
 * @param taskCtx Task context.
 * @param directWrite Direct write flag.
 * @param fileName File name.
 * @throws IOException In case of IO exception.
 */
HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
    @Nullable String fileName, TaskAttemptID attempt) throws IOException {
    this.jobConf = jobConf;
    this.taskCtx = taskCtx;
    this.attempt = attempt;

    if (directWrite) {
        jobConf.set("mapreduce.task.attempt.id", attempt.toString());

        OutputFormat outFormat = jobConf.getOutputFormat();

        writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
    }
    else
        writer = null;
}
 
開發者ID:apache,項目名稱:ignite,代碼行數:24,代碼來源:HadoopV1OutputCollector.java

示例3: testMROutput

import org.apache.hadoop.mapred.OutputFormat; //導入方法依賴的package包/類
@Test
public void testMROutput() throws Exception {
  JobConf job = new JobConf(conf);
  Properties properties = new Properties();
  StructObjectInspector inspector;
  synchronized (TestOrcFile.class) {
    inspector = (StructObjectInspector)
        ObjectInspectorFactory.getReflectionObjectInspector(NestedRow.class,
            ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
  }
  SerDe serde = new OrcSerde();
  OutputFormat<?, ?> outFormat = new OrcOutputFormat();
  RecordWriter writer =
      outFormat.getRecordWriter(fs, conf, testFilePath.toString(),
          Reporter.NULL);
  writer.write(NullWritable.get(),
      serde.serialize(new NestedRow(1,2,3), inspector));
  writer.write(NullWritable.get(),
      serde.serialize(new NestedRow(4,5,6), inspector));
  writer.write(NullWritable.get(),
      serde.serialize(new NestedRow(7,8,9), inspector));
  writer.close(Reporter.NULL);
  serde = new OrcSerde();
  properties.setProperty("columns", "z,r");
  properties.setProperty("columns.types", "int:struct<x:int,y:int>");
  serde.initialize(conf, properties);
  inspector = (StructObjectInspector) serde.getObjectInspector();
  InputFormat<?,?> in = new OrcInputFormat();
  FileInputFormat.setInputPaths(conf, testFilePath.toString());
  InputSplit[] splits = in.getSplits(conf, 1);
  assertEquals(1, splits.length);
  conf.set("hive.io.file.readcolumn.ids", "1");
  org.apache.hadoop.mapred.RecordReader reader =
      in.getRecordReader(splits[0], conf, Reporter.NULL);
  Object key = reader.createKey();
  Object value = reader.createValue();
  int rowNum = 0;
  List<? extends StructField> fields = inspector.getAllStructFieldRefs();
  StructObjectInspector inner = (StructObjectInspector)
      fields.get(1).getFieldObjectInspector();
  List<? extends StructField> inFields = inner.getAllStructFieldRefs();
  IntObjectInspector intInspector =
      (IntObjectInspector) inFields.get(0).getFieldObjectInspector();
  while (reader.next(key, value)) {
    assertEquals(null, inspector.getStructFieldData(value, fields.get(0)));
    Object sub = inspector.getStructFieldData(value, fields.get(1));
    assertEquals(3*rowNum+1, intInspector.get(inner.getStructFieldData(sub,
        inFields.get(0))));
    assertEquals(3*rowNum+2, intInspector.get(inner.getStructFieldData(sub,
        inFields.get(1))));
    rowNum += 1;
  }
  assertEquals(3, rowNum);
  reader.close();
}
 
開發者ID:facebookarchive,項目名稱:hive-dwrf,代碼行數:56,代碼來源:TestInputOutputFormat.java

示例4: testMROutput2

import org.apache.hadoop.mapred.OutputFormat; //導入方法依賴的package包/類
@Test
public void testMROutput2() throws Exception {
  JobConf job = new JobConf(conf);
  // Test that you can set the output directory using this config
  job.set("mapred.work.output.dir", testFilePath.getParent().toString());
  Properties properties = new Properties();
  StructObjectInspector inspector;
  synchronized (TestOrcFile.class) {
    inspector = (StructObjectInspector)
        ObjectInspectorFactory.getReflectionObjectInspector(StringRow.class,
            ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
  }
  SerDe serde = new OrcSerde();
  OutputFormat<?, ?> outFormat = new OrcOutputFormat();
  RecordWriter writer =
      outFormat.getRecordWriter(fs, job, testFilePath.getName(),
          Reporter.NULL);
  writer.write(NullWritable.get(),
      serde.serialize(new StringRow("a"), inspector));
  writer.close(Reporter.NULL);
  serde = new OrcSerde();
  properties.setProperty("columns", "col");
  properties.setProperty("columns.types", "string");
  serde.initialize(conf, properties);
  inspector = (StructObjectInspector) serde.getObjectInspector();
  InputFormat<?,?> in = new OrcInputFormat();
  FileInputFormat.setInputPaths(conf, testFilePath.toString());
  InputSplit[] splits = in.getSplits(conf, 1);
  assertEquals(1, splits.length);
  org.apache.hadoop.mapred.RecordReader reader =
      in.getRecordReader(splits[0], conf, Reporter.NULL);
  Object key = reader.createKey();
  Object value = reader.createValue();
  int rowNum = 0;
  List<? extends StructField> fields = inspector.getAllStructFieldRefs();
  reader.next(key, value);
  assertEquals("a",
      ((StringObjectInspector) fields.get(0).getFieldObjectInspector()).getPrimitiveJavaObject(
          inspector.getStructFieldData(value, fields.get(0))));
  reader.close();

}
 
開發者ID:facebookarchive,項目名稱:hive-dwrf,代碼行數:43,代碼來源:TestInputOutputFormat.java

示例5: getRecordWriter

import org.apache.hadoop.mapred.OutputFormat; //導入方法依賴的package包/類
public RecordWriter<K, V> getRecordWriter( FileSystem fs, JobConf job, String name, Progressable progress )
  throws IOException
{
  String outputFilename = getOutputFilename( job );

  OutputFormat<K,V> of = getOutputFormat( job );

  return of.getRecordWriter( fs, job, outputFilename, progress );
  
}
 
開發者ID:iipc,項目名稱:webarchive-commons,代碼行數:11,代碼來源:PerMapOutputFormat.java


注:本文中的org.apache.hadoop.mapred.OutputFormat.getRecordWriter方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。