本文整理匯總了Java中org.apache.hadoop.mapred.OutputFormat類的典型用法代碼示例。如果您正苦於以下問題:Java OutputFormat類的具體用法?Java OutputFormat怎麽用?Java OutputFormat使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
OutputFormat類屬於org.apache.hadoop.mapred包,在下文中一共展示了OutputFormat類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: addDependencyJars
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
job,
// when making changes here, consider also mapreduce.TableMapReduceUtil
// pull job classes
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
示例2: testWriteBufferData
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test(enabled = true)
public void testWriteBufferData() throws Exception {
NullWritable nada = NullWritable.get();
MneDurableOutputSession<DurableBuffer<?>> sess =
new MneDurableOutputSession<DurableBuffer<?>>(null, m_conf,
MneConfigHelper.DEFAULT_OUTPUT_CONFIG_PREFIX);
MneDurableOutputValue<DurableBuffer<?>> mdvalue =
new MneDurableOutputValue<DurableBuffer<?>>(sess);
OutputFormat<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> outputFormat =
new MneOutputFormat<MneDurableOutputValue<DurableBuffer<?>>>();
RecordWriter<NullWritable, MneDurableOutputValue<DurableBuffer<?>>> writer =
outputFormat.getRecordWriter(m_fs, m_conf, null, null);
DurableBuffer<?> dbuf = null;
Checksum cs = new CRC32();
cs.reset();
for (int i = 0; i < m_reccnt; ++i) {
dbuf = genupdDurableBuffer(sess, cs);
Assert.assertNotNull(dbuf);
writer.write(nada, mdvalue.of(dbuf));
}
m_checksum = cs.getValue();
writer.close(null);
sess.close();
}
示例3: testOpen
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testOpen() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
JobConf jobConf = Mockito.spy(new JobConf());
when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.open(1, 1);
verify(jobConf, times(2)).getOutputCommitter();
verify(outputCommitter, times(1)).setupJob(any(JobContext.class));
verify(dummyOutputFormat, times(1)).getRecordWriter(any(FileSystem.class), any(JobConf.class), anyString(), any(Progressable.class));
}
示例4: testCloseWithTaskCommit
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testCloseWithTaskCommit() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(true);
DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
JobConf jobConf = mock(JobConf.class);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.recordWriter = recordWriter;
outputFormat.outputCommitter = outputCommitter;
outputFormat.close();
verify(recordWriter, times(1)).close(any(Reporter.class));
verify(outputCommitter, times(1)).commitTask(any(TaskAttemptContext.class));
}
示例5: testCloseWithoutTaskCommit
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testCloseWithoutTaskCommit() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
when(outputCommitter.needsTaskCommit(any(TaskAttemptContext.class))).thenReturn(false);
DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
JobConf jobConf = mock(JobConf.class);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.recordWriter = recordWriter;
outputFormat.outputCommitter = outputCommitter;
outputFormat.close();
verify(recordWriter, times(1)).close(any(Reporter.class));
verify(outputCommitter, times(0)).commitTask(any(TaskAttemptContext.class));
}
示例6: testOpen
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testOpen() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
JobConf jobConf = spy(new JobConf());
when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.open(1, 1);
verify(jobConf, times(2)).getOutputCommitter();
verify(outputCommitter, times(1)).setupJob(any(JobContext.class));
verify(dummyOutputFormat, times(1)).getRecordWriter(any(FileSystem.class), any(JobConf.class), anyString(), any(Progressable.class));
}
示例7: get
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Override @Nonnull
public List<Processor> get(int count) {
return processorList = range(0, count).mapToObj(i -> {
try {
String uuid = context.jetInstance().getCluster().getLocalMember().getUuid();
TaskAttemptID taskAttemptID = new TaskAttemptID("jet-node-" + uuid, jobContext.getJobID().getId(),
JOB_SETUP, i, 0);
jobConf.set("mapred.task.id", taskAttemptID.toString());
jobConf.setInt("mapred.task.partition", i);
TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
@SuppressWarnings("unchecked")
OutputFormat<K, V> outFormat = jobConf.getOutputFormat();
RecordWriter<K, V> recordWriter = outFormat.getRecordWriter(
null, jobConf, uuid + '-' + valueOf(i), Reporter.NULL);
return new WriteHdfsP<>(
recordWriter, taskAttemptContext, outputCommitter, extractKeyFn, extractValueFn);
} catch (IOException e) {
throw new JetException(e);
}
}).collect(toList());
}
示例8: HadoopV1OutputCollector
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
/**
* @param jobConf Job configuration.
* @param taskCtx Task context.
* @param directWrite Direct write flag.
* @param fileName File name.
* @throws IOException In case of IO exception.
*/
HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite,
@Nullable String fileName, TaskAttemptID attempt) throws IOException {
this.jobConf = jobConf;
this.taskCtx = taskCtx;
this.attempt = attempt;
if (directWrite) {
jobConf.set("mapreduce.task.attempt.id", attempt.toString());
OutputFormat outFormat = jobConf.getOutputFormat();
writer = outFormat.getRecordWriter(null, jobConf, fileName, Reporter.NULL);
}
else
writer = null;
}
示例9: addDependencyJars
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
/**
* @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(Job)
*/
public static void addDependencyJars(JobConf job) throws IOException {
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(
job,
org.apache.zookeeper.ZooKeeper.class,
com.google.common.base.Function.class,
com.google.protobuf.Message.class,
job.getMapOutputKeyClass(),
job.getMapOutputValueClass(),
job.getOutputKeyClass(),
job.getOutputValueClass(),
job.getPartitionerClass(),
job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
job.getCombinerClass());
}
示例10: setOutputFormatClass
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
/**
* Set the underlying output format for LazyOutputFormat.
* @param job the {@link JobConf} to modify
* @param theClass the underlying class
*/
@SuppressWarnings("unchecked")
public static void setOutputFormatClass(JobConf job,
Class<? extends OutputFormat> theClass) {
job.setOutputFormat(LazyOutputFormat.class);
job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
}
示例11: getBaseOutputFormat
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@SuppressWarnings("unchecked")
private void getBaseOutputFormat(JobConf job) throws IOException {
baseOut = ReflectionUtils.newInstance(
job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class),
job);
if (baseOut == null) {
throw new IOException("Ouput format not set for LazyOutputFormat");
}
}
示例12: LazyRecordWriter
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
public LazyRecordWriter(JobConf job, OutputFormat of, String name,
Progressable progress) throws IOException {
this.of = of;
this.job = job;
this.name = name;
this.progress = progress;
}
示例13: testWriteRecord
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testWriteRecord() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyRecordWriter recordWriter = mock(DummyRecordWriter.class);
JobConf jobConf = mock(JobConf.class);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.recordWriter = recordWriter;
outputFormat.writeRecord(new Tuple2<>("key", 1L));
verify(recordWriter, times(1)).write(anyString(), anyLong());
}
示例14: testFinalizeGlobal
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testFinalizeGlobal() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
JobConf jobConf = Mockito.spy(new JobConf());
when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.finalizeGlobal(1);
verify(outputCommitter, times(1)).commitJob(any(JobContext.class));
}
示例15: testFinalizeGlobal
import org.apache.hadoop.mapred.OutputFormat; //導入依賴的package包/類
@Test
public void testFinalizeGlobal() throws Exception {
OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class);
DummyOutputCommitter outputCommitter = mock(DummyOutputCommitter.class);
JobConf jobConf = spy(new JobConf());
when(jobConf.getOutputCommitter()).thenReturn(outputCommitter);
HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf);
outputFormat.finalizeGlobal(1);
verify(outputCommitter, times(1)).commitJob(any(JobContext.class));
}