当前位置: 首页>>代码示例>>Java>>正文


Java CodecPool.returnCompressor方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.CodecPool.returnCompressor方法的典型用法代码示例。如果您正苦于以下问题:Java CodecPool.returnCompressor方法的具体用法?Java CodecPool.returnCompressor怎么用?Java CodecPool.returnCompressor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.compress.CodecPool的用法示例。


在下文中一共展示了CodecPool.returnCompressor方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: close

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:SequenceFile.java

示例2: close

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
  serializer.flush();
  serializer.beforeClose();
  if (!isFinished) {
    cmpOut.finish();
    isFinished = true;
  }
  fsOut.flush();
  hflushOrSync(fsOut);
  cmpOut.close();
  if (compressor != null) {
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }
  unregisterCurrentStream();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:18,代码来源:HDFSCompressedDataStream.java

示例3: close

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/** Close the file. */
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:SequenceFile.java

示例4: returnCompressor

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Return a compressor: " + compressor.hashCode());
    }
    CodecPool.returnCompressor(compressor);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:9,代码来源:Compression.java

示例5: close

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
  try {
    writer.close();
  } finally {
    if (compressor != null) {
      CodecPool.returnCompressor(compressor);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DefaultOutputter.java

示例6: close

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void close() throws IOException {

      // When IFile writer is created by BackupStore, we do not have
      // Key and Value classes set. So, check before closing the
      // serializers
      if (keyClass != null) {
        keySerializer.close();
        valueSerializer.close();
      }

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:IFile.java

示例7: returnCompressor

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Returning compressor " + compressor + " to pool.");
    CodecPool.returnCompressor(compressor);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:Compression.java

示例8: returnCompressor

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    CodecPool.returnCompressor(compressor);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:6,代码来源:Compression.java

示例9: returnCompressor

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    LOG.debug("Return a compressor: " + compressor.hashCode());
    CodecPool.returnCompressor(compressor);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:Compression.java

示例10: release

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
private void release() {
    if (compressor != null) {
        CodecPool.returnCompressor(compressor);
    }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:6,代码来源:CodecFactory.java

示例11: buildCellBlock

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/**
 * Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
 * <code>compressor</code>.
 * @param codec
 * @param compressor
 * @param cellScanner
 * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
 * passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
 * flipped and is ready for reading.  Use limit to find total size.
 * @throws IOException
 */
@SuppressWarnings("resource")
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
  final CellScanner cellScanner)
throws IOException {
  if (cellScanner == null) return null;
  if (codec == null) throw new CellScannerButNoCodecException();
  int bufferSize = this.cellBlockBuildingInitialBufferSize;
  if (cellScanner instanceof HeapSize) {
    long longSize = ((HeapSize)cellScanner).heapSize();
    // Just make sure we don't have a size bigger than an int.
    if (longSize > Integer.MAX_VALUE) {
      throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
    }
    bufferSize = ClassSize.align((int)longSize);
  } // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
  // See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
  // total size before creating the buffer.  It costs somw small percentage.  If we are usually
  // within the estimated buffer size, then the cost is not worth it.  If we are often well
  // outside the guesstimated buffer size, the processing can be done in half the time if we
  // go w/ the estimated size rather than let the buffer resize.
  ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
  OutputStream os = baos;
  Compressor poolCompressor = null;
  try {
    if (compressor != null) {
      if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
      poolCompressor = CodecPool.getCompressor(compressor);
      os = compressor.createOutputStream(os, poolCompressor);
    }
    Codec.Encoder encoder = codec.getEncoder(os);
    int count = 0;
    while (cellScanner.advance()) {
      encoder.write(cellScanner.current());
      count++;
    }
    encoder.flush();
    // If no cells, don't mess around.  Just return null (could be a bunch of existence checking
    // gets or something -- stuff that does not return a cell).
    if (count == 0) return null;
  } finally {
    os.close();
    if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
  }
  if (LOG.isTraceEnabled()) {
    if (bufferSize < baos.size()) {
      LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
        "; up hbase.ipc.cellblock.building.initial.buffersize?");
    }
  }
  return baos.getByteBuffer();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:63,代码来源:IPCUtil.java

示例12: release

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
private void release()
{
    if (compressor != null) {
        CodecPool.returnCompressor(compressor);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:7,代码来源:ParquetCodecFactory.java

示例13: buildCellBlock

import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/**
 * Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
 * <code>compressor</code>.
 * @param codec
 * @param compressor
 * @Param cellScanner
 * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
 * passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
 * flipped and is ready for reading.  Use limit to find total size.
 * @throws IOException
 */
@SuppressWarnings("resource")
ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
  final CellScanner cellScanner)
throws IOException {
  if (cellScanner == null) return null;
  if (codec == null) throw new CellScannerButNoCodecException();
  int bufferSize = this.cellBlockBuildingInitialBufferSize;
  if (cellScanner instanceof HeapSize) {
    long longSize = ((HeapSize)cellScanner).heapSize();
    // Just make sure we don't have a size bigger than an int.
    if (longSize > Integer.MAX_VALUE) {
      throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
    }
    bufferSize = ClassSize.align((int)longSize);
  } // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
  // See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
  // total size before creating the buffer.  It costs somw small percentage.  If we are usually
  // within the estimated buffer size, then the cost is not worth it.  If we are often well
  // outside the guesstimated buffer size, the processing can be done in half the time if we
  // go w/ the estimated size rather than let the buffer resize.
  ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
  OutputStream os = baos;
  Compressor poolCompressor = null;
  try {
    if (compressor != null) {
      if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
      poolCompressor = CodecPool.getCompressor(compressor);
      os = compressor.createOutputStream(os, poolCompressor);
    }
    Codec.Encoder encoder = codec.getEncoder(os);
    int count = 0;
    while (cellScanner.advance()) {
      encoder.write(cellScanner.current());
      count++;
    }
    encoder.flush();
    // If no cells, don't mess around.  Just return null (could be a bunch of existence checking
    // gets or something -- stuff that does not return a cell).
    if (count == 0) return null;
  } finally {
    os.close();
    if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
  }
  if (LOG.isTraceEnabled()) {
    if (bufferSize < baos.size()) {
      LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
        "; up hbase.ipc.cellblock.building.initial.buffersize?");
    }
  }
  return baos.getByteBuffer();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:63,代码来源:IPCUtil.java


注:本文中的org.apache.hadoop.io.compress.CodecPool.returnCompressor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。