當前位置: 首頁>>代碼示例>>Java>>正文


Java Decompressor.reset方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.compress.Decompressor.reset方法的典型用法代碼示例。如果您正苦於以下問題:Java Decompressor.reset方法的具體用法?Java Decompressor.reset怎麽用?Java Decompressor.reset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.compress.Decompressor的用法示例。


在下文中一共展示了Decompressor.reset方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getDecompressor

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
public Decompressor getDecompressor() throws IOException {
  CompressionCodec codec = getCodec();
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG.warn("Deompressor obtained from CodecPool already finished()");
      } else {
        if(LOG.isDebugEnabled()) {
          LOG.debug("Got a decompressor: " + decompressor.hashCode());
        }
      }
      /**
       * Following statement is necessary to get around bugs in 0.18 where a
       * decompressor is referenced after returned back to the codec pool.
       */
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:26,代碼來源:Compression.java

示例2: getDecompressor

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
public Decompressor getDecompressor() {
  CompressionCodec codec = getCodec(conf);
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (LOG.isTraceEnabled()) LOG.trace("Retrieved decompressor " + decompressor
        + " from pool.");
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using it.
        LOG.warn("Deompressor obtained from CodecPool is already finished()");
      }
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:Compression.java

示例3: getDecompressor

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
public Decompressor getDecompressor() {
  CompressionCodec codec = getCodec(conf);
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG
            .warn("Deompressor obtained from CodecPool is already finished()");
        // throw new AssertionError(
        // "Decompressor obtained from CodecPool is already finished()");
      }
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:21,代碼來源:Compression.java

示例4: getDecompressor

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
public Decompressor getDecompressor() throws IOException {
  CompressionCodec codec = getCodec();
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG.warn("Deompressor obtained from CodecPool already finished()");
      } else {
        LOG.debug("Got a decompressor: " + decompressor.hashCode());
      }
      /**
       * Following statement is necessary to get around bugs in 0.18 where a
       * decompressor is referenced after returned back to the codec pool.
       */
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:24,代碼來源:Compression.java

示例5: getDecompressor

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
public Decompressor getDecompressor() {
  CompressionCodec codec = getCodec(conf);
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (LOG.isTraceEnabled()) LOG.trace("Retrieved decompressor " + decompressor + " from pool.");
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using it.
        LOG.warn("Deompressor obtained from CodecPool is already finished()");
      }
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
開發者ID:grokcoder,項目名稱:pbase,代碼行數:18,代碼來源:Compression.java

示例6: closeAndRelease

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
@Override
public void closeAndRelease(CompressionInputStream cin) {

	IOUtils.closeQuietly(cin);

	if (hasDecompressors) {
		Decompressor dec = usedDecompressors.remove(cin);
		dec.reset();
		decompressorQueue.offer(dec);
		status.setCounter(DECOMPRESSOR_STR,
				decompressorsUsedCount.decrementAndGet());
	}

}
 
開發者ID:gerritjvv,項目名稱:bigstreams,代碼行數:15,代碼來源:CompressionPoolImpl.java

示例7: DataSegmentReader

import org.apache.hadoop.io.compress.Decompressor; //導入方法依賴的package包/類
/**
 * May throw EOFException if InputStream does not have a
 * complete data segment.
 *
 * NOTE: This class holds reference to the Decompressor in
 * the decompressorCache, until the return value of
 * getInputStream() is closed.
 *
 * @param decompressorCache
 * @throws EmptyDataSegmentException  if there is nothing to read.
 * @throws EOFException  if the data segment is not complete.
 */
DataSegmentReader(DataInputStream in, Configuration conf,
    HashMap<Text, Decompressor> decompressorCache)
    throws EmptyDataSegmentException, EOFException,
    ClassNotFoundException, IOException {

  // Read from DataInputStream
  // 1. Read length
  int length = 0;
  try {
    length = in.readInt();
  } catch (EOFException e) {
    throw new EmptyDataSegmentException();
  }

  // 2. Read codec
  int codecNameUTF8Length = in.readShort();
  byte[] codecNameUTF8 = new byte[codecNameUTF8Length];
  in.readFully(codecNameUTF8);
  Text codecNameText = new Text(codecNameUTF8);
  // 3. read CRC32 (only present when uncompressed)
  boolean hasCrc32 = (codecNameUTF8Length == 0);
  long crc32Value = 0;
  if (hasCrc32) {
    crc32Value = in.readLong();
  }
  // 4. read data
  byte[] storedData
      = new byte[length - (hasCrc32 ? 8 : 0)/*crc32*/
                 - 2/*codec length*/ - codecNameUTF8Length];
  in.readFully(storedData);

  // Verify the checksum
  if (hasCrc32) {
    CRC32 crc32 = new CRC32();
    crc32.update(storedData);
    if (crc32.getValue() != crc32Value) {
      throw new CorruptedDataException("Corrupted data segment with length " + length
          + " crc32 expected " + crc32Value + " but got " + crc32.getValue());
    }
  }

  // Uncompress the data if needed
  if (codecNameUTF8Length == 0) {
    // no compression
    uncompressedData = new ByteArrayInputStream(storedData);
  } else {
    CompressionCodec codec = getCodecFromName(codecNameText, conf);
    Decompressor decompressor = null;
    if (decompressorCache != null) {
      // Create decompressor and add to cache if needed.
      decompressor = decompressorCache.get(codecNameText);
      if (decompressor == null) {
        decompressor = codec.createDecompressor();
      } else {
        decompressor.reset();
      }
    }
    if (decompressor == null) {
      uncompressedData = codec.createInputStream(new ByteArrayInputStream(storedData));
    } else {
      uncompressedData = codec.createInputStream(new ByteArrayInputStream(storedData),
          decompressor);
    }
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:78,代碼來源:DataSegmentReader.java


注:本文中的org.apache.hadoop.io.compress.Decompressor.reset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。