本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader类的典型用法代码示例。如果您正苦于以下问题:Java SequenceFileRecordReader类的具体用法?Java SequenceFileRecordReader怎么用?Java SequenceFileRecordReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SequenceFileRecordReader类属于org.apache.hadoop.mapreduce.lib.input包,在下文中一共展示了SequenceFileRecordReader类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createChildReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
* Actually instantiate the user's chosen RecordReader implementation.
*/
@SuppressWarnings("unchecked")
private void createChildReader() throws IOException, InterruptedException {
LOG.debug("ChildSplit operates on: " + split.getPath(index));
Configuration conf = context.getConfiguration();
// Determine the file format we're reading.
Class rrClass;
if (ExportJobBase.isSequenceFiles(conf, split.getPath(index))) {
rrClass = SequenceFileRecordReader.class;
} else {
rrClass = LineRecordReader.class;
}
// Create the appropriate record reader.
this.rr = (RecordReader<LongWritable, Object>)
ReflectionUtils.newInstance(rrClass, conf);
}
示例2: initialize
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void initialize(InputSplit curSplit, TaskAttemptContext curContext) throws IOException, InterruptedException {
this.split = (CombineFileSplit) curSplit;
this.context = curContext;
if (null == rr) {
rr = ReflectionUtils.newInstance(SequenceFileRecordReader.class, context.getConfiguration());
}
FileSplit fileSplit = new FileSplit(this.split.getPath(index),
this.split.getOffset(index), this.split.getLength(index),
this.split.getLocations());
this.rr.initialize(fileSplit, this.context);
}
示例3: openForRead
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
reader = new SequenceFileRecordReader<K, V>();
reader.initialize(new FileSplit(chunkFilePath, 0,
DistCpUtils.getFileSize(chunkFilePath, configuration), null),
taskAttemptContext);
}
示例4: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
* Returns a reader for this split of the distributed cache file list.
*/
@Override
public RecordReader<LongWritable, BytesWritable> createRecordReader(
InputSplit split, final TaskAttemptContext taskContext)
throws IOException, InterruptedException {
return new SequenceFileRecordReader<LongWritable, BytesWritable>();
}
示例5: openForRead
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
reader = new SequenceFileRecordReader<K, V>();
reader.initialize(new FileSplit(chunkFilePath, 0,
DistCpUtils.getFileSize(chunkFilePath,
chunkContext.getConfiguration()), null), taskAttemptContext);
}
示例6: CombineSequenceFileRecordReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public CombineSequenceFileRecordReader(CombineFileSplit split, TaskAttemptContext context, Integer index) throws IOException, InterruptedException {
this.index = index;
this.split = (CombineFileSplit) split;
this.context = context;
this.rr = ReflectionUtils.newInstance(SequenceFileRecordReader.class, context.getConfiguration());
}
示例7: createRecordReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<NullWritable, V> createRecordReader(final InputSplit split, final TaskAttemptContext context)
throws IOException, InterruptedException {
final SequenceFileRecordReader<NullWritable, V> reader = new SequenceFileRecordReader<NullWritable, V>();
reader.initialize(split, context);
return reader;
}
示例8: openForRead
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
reader = new SequenceFileRecordReader<>();
reader.initialize(new FileSplit(chunkFilePath, 0, getFileSize(chunkFilePath, configuration), null),
taskAttemptContext);
}
示例9: getReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
* Getter for the record-reader, opened to the chunk-file.
* @return Opened Sequence-file reader.
*/
public SequenceFileRecordReader<K,V> getReader() {
assert reader != null : "Reader un-initialized!";
return reader;
}
示例10: prepareToRead
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void prepareToRead(RecordReader reader, PigSplit split)
throws IOException {
this.reader = (SequenceFileRecordReader) reader;
}
示例11: VectorSequenceFileRecordReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
public VectorSequenceFileRecordReader() {
// recordReader = new SequenceFileRecordReader<LongWritable, ArrayListWritable>();
recordReader = new SequenceFileRecordReader<LongWritable, NumberListWritable<Double>>();
pair = new PairOfByteBuffers();
}
示例12: getRecordReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<RecordKey, Text> getRecordReader(InputSplit split, TaskAttemptContext context, Integer index) {
return new SequenceFileRecordReader<>();
}
示例13: SequenceFileAsJSONRecordBatchReader
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
public SequenceFileAsJSONRecordBatchReader()
throws IOException {
sequenceFileRecordReader =
new SequenceFileRecordReader<WritableComparable<?>, Writable>();
}
示例14: prepareToRead
import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void prepareToRead(RecordReader reader, PigSplit split)
throws IOException {
this.reader = (SequenceFileRecordReader) reader;
}