当前位置: 首页>>代码示例>>Java>>正文


Java SequenceFileRecordReader类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader的典型用法代码示例。如果您正苦于以下问题:Java SequenceFileRecordReader类的具体用法?Java SequenceFileRecordReader怎么用?Java SequenceFileRecordReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SequenceFileRecordReader类属于org.apache.hadoop.mapreduce.lib.input包,在下文中一共展示了SequenceFileRecordReader类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createChildReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
 * Actually instantiate the user's chosen RecordReader implementation.
 */
@SuppressWarnings("unchecked")
private void createChildReader() throws IOException, InterruptedException {
  LOG.debug("ChildSplit operates on: " + split.getPath(index));

  Configuration conf = context.getConfiguration();

  // Determine the file format we're reading.
  Class rrClass;
  if (ExportJobBase.isSequenceFiles(conf, split.getPath(index))) {
    rrClass = SequenceFileRecordReader.class;
  } else {
    rrClass = LineRecordReader.class;
  }

  // Create the appropriate record reader.
  this.rr = (RecordReader<LongWritable, Object>)
      ReflectionUtils.newInstance(rrClass, conf);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:22,代码来源:CombineShimRecordReader.java

示例2: initialize

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void initialize(InputSplit curSplit, TaskAttemptContext curContext) throws IOException, InterruptedException {
	this.split = (CombineFileSplit) curSplit;
	this.context = curContext;

	if (null == rr) {
		rr = ReflectionUtils.newInstance(SequenceFileRecordReader.class, context.getConfiguration());
	}

	FileSplit fileSplit = new FileSplit(this.split.getPath(index),
			this.split.getOffset(index), this.split.getLength(index),
			this.split.getLocations());
	
	this.rr.initialize(fileSplit, this.context);
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:17,代码来源:CombineSequenceFileRecordReader.java

示例3: openForRead

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {
  reader = new SequenceFileRecordReader<K, V>();
  reader.initialize(new FileSplit(chunkFilePath, 0,
          DistCpUtils.getFileSize(chunkFilePath, configuration), null),
          taskAttemptContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DynamicInputChunk.java

示例4: createRecordReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
 * Returns a reader for this split of the distributed cache file list.
 */
@Override
public RecordReader<LongWritable, BytesWritable> createRecordReader(
    InputSplit split, final TaskAttemptContext taskContext)
    throws IOException, InterruptedException {
  return new SequenceFileRecordReader<LongWritable, BytesWritable>();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:GenerateDistCacheData.java

示例5: openForRead

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {
  reader = new SequenceFileRecordReader<K, V>();
  reader.initialize(new FileSplit(chunkFilePath, 0,
          DistCpUtils.getFileSize(chunkFilePath,
              chunkContext.getConfiguration()), null), taskAttemptContext);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:DynamicInputChunk.java

示例6: CombineSequenceFileRecordReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public CombineSequenceFileRecordReader(CombineFileSplit split, TaskAttemptContext context, Integer index) throws IOException, InterruptedException {
	this.index = index;
	this.split = (CombineFileSplit) split;
	this.context = context;

	this.rr = ReflectionUtils.newInstance(SequenceFileRecordReader.class, context.getConfiguration());
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:9,代码来源:CombineSequenceFileRecordReader.java

示例7: createRecordReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<NullWritable, V> createRecordReader(final InputSplit split, final TaskAttemptContext context)
        throws IOException, InterruptedException {
    final SequenceFileRecordReader<NullWritable, V> reader = new SequenceFileRecordReader<NullWritable, V>();
    reader.initialize(split, context);
    return reader;
}
 
开发者ID:Conductor,项目名称:kangaroo,代码行数:8,代码来源:WritableValueInputFormat.java

示例8: openForRead

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
private void openForRead(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
  reader = new SequenceFileRecordReader<>();
  reader.initialize(new FileSplit(chunkFilePath, 0, getFileSize(chunkFilePath, configuration), null),
      taskAttemptContext);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:6,代码来源:DynamicInputChunk.java

示例9: getReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
/**
 * Getter for the record-reader, opened to the chunk-file.
 * @return Opened Sequence-file reader.
 */
public SequenceFileRecordReader<K,V> getReader() {
  assert reader != null : "Reader un-initialized!";
  return reader;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DynamicInputChunk.java

示例10: prepareToRead

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void prepareToRead(RecordReader reader, PigSplit split)
    throws IOException {
  this.reader = (SequenceFileRecordReader) reader;
}
 
开发者ID:Hanmourang,项目名称:hiped2,代码行数:7,代码来源:SequenceFileStockLoader.java

示例11: VectorSequenceFileRecordReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
public VectorSequenceFileRecordReader() {
//		recordReader = new SequenceFileRecordReader<LongWritable, ArrayListWritable>();
		recordReader = new SequenceFileRecordReader<LongWritable, NumberListWritable<Double>>();
		pair = new PairOfByteBuffers();
	}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:6,代码来源:VectorSequenceFileRecordReader.java

示例12: getRecordReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@Override
public RecordReader<RecordKey, Text> getRecordReader(InputSplit split, TaskAttemptContext context, Integer index) {
  return new SequenceFileRecordReader<>();
}
 
开发者ID:ggear,项目名称:cloudera-framework,代码行数:5,代码来源:RecordSequenceInputFormatCsv.java

示例13: SequenceFileAsJSONRecordBatchReader

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
public SequenceFileAsJSONRecordBatchReader()
		throws IOException {
	sequenceFileRecordReader =
			new SequenceFileRecordReader<WritableComparable<?>, Writable>();
}
 
开发者ID:therelaxist,项目名称:spring-usc,代码行数:6,代码来源:SequenceFileAsJSONRecordBatchReader.java

示例14: prepareToRead

import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void prepareToRead(RecordReader reader, PigSplit split)
      throws IOException {
  this.reader = (SequenceFileRecordReader) reader;
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:7,代码来源:SequenceFileLoader.java


注:本文中的org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。