本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader类的典型用法代码示例。如果您正苦于以下问题:Java CombineFileRecordReader类的具体用法?Java CombineFileRecordReader怎么用?Java CombineFileRecordReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CombineFileRecordReader类属于org.apache.hadoop.mapreduce.lib.input包,在下文中一共展示了CombineFileRecordReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) {
@SuppressWarnings("unused")
Configuration conf = context.getConfiguration();
CombineFileRecordReader<Text, Text> reader = null;
try {
reader = new CombineFileRecordReader<Text, Text>(
(CombineFileSplit) split, context,
CombineFileLineRecordReader.class);
} catch (IOException e) {
e.printStackTrace();
}
return reader;
}
示例2: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader((CombineFileSplit)split, context,
DelegatingAvroRecordReader.class);
}
示例3: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
/**
* Return a CombineFileRecordReader
*/
@Override
public CombineFileRecordReader<Text, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
if (!(split instanceof CombineFileSplit))
throw new IllegalArgumentException("Input must be a Directory!");
return new CombineFileRecordReader<Text, BytesWritable>((CombineFileSplit) split, context, BinaryFileRecordReader.class);
}
示例4: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit inputSplit,
TaskAttemptContext context) throws IOException
{
Schema readerSchema = AvroJob.getInputKeySchema(context.getConfiguration());
if (null == readerSchema) {
LOG.warn("Reader schema was not set. Use AvroJob.setInputKeySchema() if desired.");
LOG.info("Using a reader schema equal to the writer schema.");
}
Object c = CombinedAvroKeyRecordReader.class;
return new CombineFileRecordReader<AvroKey<T>, NullWritable>((CombineFileSplit) inputSplit, context, (Class<? extends RecordReader<AvroKey<T>, NullWritable>>)c);
}
示例5: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<NullWritable,Spread> createRecordReader( final InputSplit split , final TaskAttemptContext context)throws IOException{
return new CombineFileRecordReader<NullWritable,Spread>( (CombineFileSplit)split , context , MDSCombineSpreadReader.class );
}
示例6: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader<WordOffset, Text>(
(CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
示例7: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx)
throws IOException {
return new CombineFileRecordReader<AvroKey<GenericRecord>, NullWritable>((CombineFileSplit) split, cx,
AvroKeyCombineFileRecordReader.class);
}
示例8: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<FileLineWritable, Text> createRecordReader(
InputSplit split, TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader<>(
(CombineFileSplit)split, context,FileLineWritableRecordReader.class);
}
示例9: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
/**
* 返回一个CombineFileRecordReader对象
* CombineFileRecordReader的构造函数中,指定RecordReader
*/
@Override
public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
CombineFileRecordReader<Text, Text> recordReader = new CombineFileRecordReader((CombineFileSplit) split, context, CombineFileRecordReader.class);
return recordReader;
}
示例10: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<RecordKey, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader<>((CombineFileSplit) split, context, RecordReaderText.class);
}
示例11: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public RecordReader<K, V> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader((CombineFileSplit)split, context, CombineSequenceFileRecordReader.class);
}
示例12: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx)
throws IOException {
return new CombineFileRecordReader<>((CombineFileSplit) split, cx, AvroKeyCombineFileRecordReader.class);
}
示例13: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<Writable, Text> createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException {
return new CombineFileRecordReader<Writable, Text>((CombineFileSplit) split, context,
SequenceFileRecordReaderWrapper.class);
}
示例14: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
public RecordReader<FileLineWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException{
return new CombineFileRecordReader<FileLineWritable, Text>((CombineFileSplit)split, context, CFRecordReader.class);
}
示例15: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<LongWritable, Text> createRecordReader(org.apache.hadoop.mapreduce.InputSplit inputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext taskAttemptContext) throws IOException {
return new CombineFileRecordReader<LongWritable, Text>((CombineFileSplit) inputSplit, taskAttemptContext, MyLineRecordReader.class);
}