當前位置: 首頁>>代碼示例>>Java>>正文


Java FileSplit.getLength方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.lib.input.FileSplit.getLength方法的典型用法代碼示例。如果您正苦於以下問題:Java FileSplit.getLength方法的具體用法?Java FileSplit.getLength怎麽用?Java FileSplit.getLength使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.lib.input.FileSplit的用法示例。


在下文中一共展示了FileSplit.getLength方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: XMLRecordReader

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
/**
 * 初始化讀取資源以及相關的參數也可以放到initialize()方法中去執行
 * @param inputSplit
 * @param context
 * @throws IOException
 */
public XMLRecordReader(InputSplit inputSplit, Configuration context) throws IOException {
    /**
     * 獲取開傳入的開始和結束標簽
     */
    startTag = context.get(START_TAG_KEY).getBytes("UTF-8");
    endTag = context.get(END_TAG_KEY).getBytes("UTF-8");
    FileSplit fileSplit = (FileSplit) inputSplit;
    /**
     * 獲取分片的開始位置和結束的位置
     */
    start = fileSplit.getStart();
    end = start + fileSplit.getLength();
    Path file = fileSplit.getPath();
    FileSystem fs = file.getFileSystem(context);
    /**
     * 根據分片打開一個HDFS的文件輸入流
     */
    fsin = fs.open(fileSplit.getPath());
    /**
     * 定位到分片開始的位置
     */
    fsin.seek(start);
}
 
開發者ID:lzmhhh123,項目名稱:Wikipedia-Index,代碼行數:30,代碼來源:XmlInputFormat.java

示例2: checkSplits

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
private void checkSplits(Path listFile, List<InputSplit> splits) throws IOException {
  long lastEnd = 0;

  //Verify if each split's start is matching with the previous end and
  //we are not missing anything
  for (InputSplit split : splits) {
    FileSplit fileSplit = (FileSplit) split;
    long start = fileSplit.getStart();
    Assert.assertEquals(lastEnd, start);
    lastEnd = start + fileSplit.getLength();
  }

  //Verify there is nothing more to read from the input file
  SequenceFile.Reader reader
          = new SequenceFile.Reader(cluster.getFileSystem().getConf(),
                  SequenceFile.Reader.file(listFile));

  try {
    reader.seek(lastEnd);
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();
    Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
  } finally {
    IOUtils.closeStream(reader);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestUniformSizeInputFormat.java

示例3: SingleFastqRecordReader

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public SingleFastqRecordReader(Configuration conf, FileSplit split) throws IOException {
    file = split.getPath();
    start = split.getStart();
    end = start + split.getLength();

    FileSystem fs = file.getFileSystem(conf);
    FSDataInputStream fileIn = fs.open(file);

    CompressionCodecFactory codecFactory = new CompressionCodecFactory(conf);
    CompressionCodec codec        = codecFactory.getCodec(file);

    if (codec == null) { // no codec.  Uncompressed file.
        positionAtFirstRecord(fileIn);
        inputStream = fileIn;
    } else {
        // compressed file
        if (start != 0) {
            throw new RuntimeException("Start position for compressed file is not 0! (found " + start + ")");
        }

        inputStream = codec.createInputStream(fileIn);
        end = Long.MAX_VALUE; // read until the end of the file
    }

    lineReader = new LineReader(inputStream);
}
 
開發者ID:PAA-NCIC,項目名稱:SparkSeq,代碼行數:27,代碼來源:SingleFastqInputFormat.java

示例4: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public void initialize(final InputSplit genericSplit, final TaskAttemptContext context) throws IOException {
    final FileSplit split = (FileSplit) genericSplit;
    final Configuration configuration = context.getConfiguration();
    if (configuration.get(Constants.GREMLIN_HADOOP_GRAPH_FILTER, null) != null)
        this.graphFilter = VertexProgramHelper.deserialize(ConfUtil.makeApacheConfiguration(configuration), Constants.GREMLIN_HADOOP_GRAPH_FILTER);
    KryoShimServiceLoader.applyConfiguration(ConfUtil.makeApacheConfiguration(configuration));
    this.gryoReader = HadoopPools.getGryoPool().takeReader();
    long start = split.getStart();
    final Path file = split.getPath();
    if (null != new CompressionCodecFactory(configuration).getCodec(file)) {
        throw new IllegalStateException("Compression is not supported for the (binary) Gryo format");
    }
    // open the file and seek to the start of the split
    this.inputStream = file.getFileSystem(configuration).open(split.getPath());
    this.splitLength = split.getLength();
    if (this.splitLength > 0) this.splitLength -= (seekToHeader(this.inputStream, start) - start);
}
 
開發者ID:PKUSilvester,項目名稱:LiteGraph,代碼行數:19,代碼來源:GryoRecordReader.java

示例5: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    FileSplit fileSplit = (FileSplit) split;
    FSDataInputStream stream = FileSystem.get(context.getConfiguration()).open(fileSplit.getPath());
    if (fileSplit.getStart() != 0) {
        stream.seek(fileSplit.getStart());
    }

    remaining = fileSplit.getLength();

    JsonFactory factory = new JsonFactory().disable(JsonFactory.Feature.CANONICALIZE_FIELD_NAMES);
    parser = factory.createParser(new BufferedInputStream(stream));
    parser.setCodec(new ObjectMapper());
    parser.nextToken();
    if (parser.currentToken() == JsonToken.START_ARRAY) {
        parser.nextToken();
    }
}
 
開發者ID:vespa-engine,項目名稱:vespa,代碼行數:19,代碼來源:VespaSimpleJsonInputFormat.java

示例6: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public void initialize( final InputSplit inputSplit, final TaskAttemptContext context ) throws IOException, InterruptedException {
  FileSplit fileSplit = (FileSplit)inputSplit;
  Configuration config = context.getConfiguration();
  Path path = fileSplit.getPath();
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  long start = fileSplit.getStart();
  long length = fileSplit.getLength();
  InputStream in = fs.open( path );
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:12,代碼來源:MDSSpreadReader.java

示例7: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context)
    throws IOException, InterruptedException {
  FileSplit split = (FileSplit) genericSplit;
  Configuration conf = context.getConfiguration();
  SeekableInput in = new FsInput(split.getPath(), conf);
  DatumReader<T> datumReader = new GenericDatumReader<T>();
  this.reader = DataFileReader.openReader(in, datumReader);
  reader.sync(split.getStart());                    // sync to start
  this.start = reader.tell();
  this.end = split.getStart() + split.getLength();
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:13,代碼來源:AvroRecordReader.java

示例8: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public void initialize(InputSplit genericSplit, TaskAttemptContext context)  {
    try {
        FileSplit split = (FileSplit)genericSplit;
        Configuration job = context.getConfiguration();
        this.maxLineLength = job.getInt("mapreduce.input.linerecordreader.line.maxlength", 2147483647);
        this.start = split.getStart();
        this.end = this.start + split.getLength();
        Path file = split.getPath();
        FileSystem fs = file.getFileSystem(job);
        this.fileIn = fs.open(file);
        CompressionCodec codec = (new CompressionCodecFactory(job)).getCodec(file);
        if(null != codec) {
            this.isCompressedInput = true;
            this.decompressor = CodecPool.getDecompressor(codec);
            if(codec instanceof SplittableCompressionCodec) {
                SplitCompressionInputStream cIn = ((SplittableCompressionCodec)codec).createInputStream(this.fileIn, this.decompressor, this.start, this.end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
                this.in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
                this.start = cIn.getAdjustedStart();
                this.end = cIn.getAdjustedEnd();
                this.filePosition = cIn;
            } else {
                this.in = new SplitLineReader(codec.createInputStream(this.fileIn, this.decompressor), job, this.recordDelimiterBytes);
                this.filePosition = this.fileIn;
            }
        } else {
            this.fileIn.seek(this.start);
            this.in = new SplitLineReader(this.fileIn, job, this.recordDelimiterBytes);
            this.filePosition = this.fileIn;
        }

        if(this.start != 0L) {
            this.start += (long)this.in.readLine(new Text(), 0, this.maxBytesToConsume(this.start));
        }

        this.pos = this.start;
    }catch(Exception ex){
        LOG.warn("Exception occurred during initialization {}", ex, ex);
    }

}
 
開發者ID:Comcast,項目名稱:spark-util,代碼行數:41,代碼來源:ErrorHandlingLineRecordReader.java

示例9: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public void initialize(InputSplit is, TaskAttemptContext tac) throws IOException, InterruptedException {
	FileSplit fileSplit = (FileSplit) is;
	startTag = tac.getConfiguration().get(START_TAG_KEY).getBytes("utf-8");
	endTag = tac.getConfiguration().get(END_TAG_KEY).getBytes("utf-8");

	start = fileSplit.getStart();
	end = start + fileSplit.getLength();
	Path file = fileSplit.getPath();

	FileSystem fs = file.getFileSystem(tac.getConfiguration());
	fsin = fs.open(fileSplit.getPath());
	fsin.seek(start);

}
 
開發者ID:gatripat,項目名稱:InsAdjustment,代碼行數:16,代碼來源:XmlInputFormat.java

示例10: nextKeyValue

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
    if (iterator.hasNext()) {
        FileSplit split = iterator.next();
        setFile(split.getPath());
        String uri = makeURIFromPath(file);
        if (setKey(uri, 0, 0, true)) {
            return true;
        } 
        value = new StreamLocator(file, CompressionCodec.NONE);
        bytesRead += split.getLength();
        return true;
    }
    return false;
}
 
開發者ID:marklogic,項目名稱:marklogic-contentpump,代碼行數:16,代碼來源:StreamingDocumentReader.java

示例11: addSplit

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public void addSplit(FileSplit split) 
throws IOException, InterruptedException {
    splits.add(split);
    length += split.getLength();
    for (String loc : split.getLocations()) {
        if (!locations.contains(loc)) {
            locations.add(loc);
        }
    }
}
 
開發者ID:marklogic,項目名稱:marklogic-contentpump,代碼行數:11,代碼來源:CombineDocumentSplit.java

示例12: addIndexedSplits

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
private int addIndexedSplits(List<InputSplit> splits, int i,
		List<InputSplit> newSplits, Configuration cfg) throws IOException {
	Path file = ((FileSplit) splits.get(i)).getPath();

	SplittingBAMIndex idx = new SplittingBAMIndex(file.getFileSystem(cfg)
			.open(getIdxPath(file)));

	int splitsEnd = splits.size();
	for (int j = i; j < splitsEnd; j++) {
		if (!file.equals(((FileSplit) splits.get(j)).getPath()))
			splitsEnd = j;
	}
	for (int j = i; j < splitsEnd; j++) {
		FileSplit fileSplit = (FileSplit) splits.get(j);

		long start = fileSplit.getStart();
		long end = start + fileSplit.getLength();

		Long blockStart = idx.nextAlignment(start);

		Long blockEnd = Long.valueOf(j == splitsEnd - 1 ? idx
				.prevAlignment(end).longValue() | 0xFFFF : idx
				.nextAlignment(end).longValue());

		if (blockStart == null) {
			throw new RuntimeException(
					"Internal error or invalid index: no block start for "
							+ start);
		}
		if (blockEnd == null) {
			throw new RuntimeException(
					"Internal error or invalid index: no block end for "
							+ end);
		}
		newSplits.add(new FileVirtualSplit(file, blockStart.longValue(),
				blockEnd.longValue(), fileSplit.getLocations()));
	}
	return splitsEnd;
}
 
開發者ID:BGI-flexlab,項目名稱:SOAPgaea,代碼行數:40,代碼來源:GaeaBamInputFormat.java

示例13: getLength

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
private long getLength(FileSplit fileSplit, Configuration conf,
		long fileLength) {
	boolean isSplitable = conf.getBoolean(CRAM_FILE_SPLITABLE, false);
	if (isSplitable)
		return fileSplit.getLength();
	return fileLength;
}
 
開發者ID:BGI-flexlab,項目名稱:SOAPgaea,代碼行數:8,代碼來源:GaeaCramRecordReader.java

示例14: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
/***
 * Initializes readers
 * 
 * @param split Split to be used (asssumed to be a file split)
 * ϟaram context context of the job
 * @throws java.io.IOException in case of errors reading from the filestream provided by Hadoop
 * @throws java.lang.InterruptedException in case of thread interruption
 * 
 */

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
	   FileSplit fSplit = (FileSplit)split;
	   // Initialize start and end of split
	      start = fSplit.getStart();
	      end = start + fSplit.getLength();
	      final Path file = fSplit.getPath();
	      codec = new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
	      final FileSystem fs = file.getFileSystem(context.getConfiguration());
	      FSDataInputStream fileIn = fs.open(file);
	      // open stream
	        if (isCompressedInput()) { // decompress
	        	decompressor = CodecPool.getDecompressor(codec);
	        	if (codec instanceof SplittableCompressionCodec) {
	  		
	          	final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, start, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
	  				ebr = new EthereumBlockReader(cIn, this.maxSizeEthereumBlock,this.bufferSize,this.useDirectBuffer);
	  				start = cIn.getAdjustedStart();
	         		end = cIn.getAdjustedEnd();
	          	filePosition = cIn; // take pos from compressed stream
	        } else {
	        	ebr = new EthereumBlockReader(codec.createInputStream(fileIn,decompressor), this.maxSizeEthereumBlock,this.bufferSize,this.useDirectBuffer);
	        	filePosition = fileIn;
	        }
	      } else {
	        fileIn.seek(start);
	        ebr = new EthereumBlockReader(fileIn, this.maxSizeEthereumBlock,this.bufferSize,this.useDirectBuffer);
	        filePosition = fileIn;
	      }
}
 
開發者ID:ZuInnoTe,項目名稱:hadoopcryptoledger,代碼行數:41,代碼來源:AbstractEthereumRecordReader.java

示例15: initialize

import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
/**
* Initializes reader
* @param split Split to use (assumed to be a file split)
* @param context context of the job
*
*
* @throws java.io.IOException in case of errors reading from the filestream provided by Hadoop
* @throws java.lang.InterruptedException in case of thread interruption
*
*/
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
   FileSplit fSplit = (FileSplit)split;
 // Initialize start and end of split
    start = fSplit.getStart();
    end = start + fSplit.getLength();
    final Path file = fSplit.getPath();
    codec = new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
    final FileSystem fs = file.getFileSystem(context.getConfiguration());
    FSDataInputStream fileIn = fs.open(file);
    // open stream
      if (isCompressedInput()) { // decompress
      	decompressor = CodecPool.getDecompressor(codec);
      	if (codec instanceof SplittableCompressionCodec) {
		
        	final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, start, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
				bbr = new BitcoinBlockReader(cIn, this.maxSizeBitcoinBlock,this.bufferSize,this.specificMagicByteArray,this.useDirectBuffer,this.readAuxPOW);
		start = cIn.getAdjustedStart();
       		end = cIn.getAdjustedEnd();
        	filePosition = cIn; // take pos from compressed stream
      } else {
	bbr = new BitcoinBlockReader(codec.createInputStream(fileIn,decompressor), this.maxSizeBitcoinBlock,this.bufferSize,this.specificMagicByteArray,this.useDirectBuffer,readAuxPOW);
	filePosition = fileIn;
      }
    } else {
      fileIn.seek(start);
      bbr = new BitcoinBlockReader(fileIn, this.maxSizeBitcoinBlock,this.bufferSize,this.specificMagicByteArray,this.useDirectBuffer,readAuxPOW);  
      filePosition = fileIn;
    }
    // seek to block start (for the case a block overlaps a split)
    try {
    	bbr.seekBlockStart();
    } catch (BitcoinBlockReadException bbre) {
		LOG.error("Error reading Bitcoin blockchhain data");
		LOG.error(bbre);
    } 
}
 
開發者ID:ZuInnoTe,項目名稱:hadoopcryptoledger,代碼行數:48,代碼來源:AbstractBitcoinRecordReader.java


注:本文中的org.apache.hadoop.mapreduce.lib.input.FileSplit.getLength方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。