当前位置: 首页>>代码示例>>Java>>正文


Java Algorithm类代码示例

本文整理汇总了Java中org.apache.hadoop.io.file.tfile.Compression.Algorithm的典型用法代码示例。如果您正苦于以下问题:Java Algorithm类的具体用法?Java Algorithm怎么用?Java Algorithm使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Algorithm类属于org.apache.hadoop.io.file.tfile.Compression包,在下文中一共展示了Algorithm类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: WBlockState

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:27,代码来源:BCFile.java

示例2: prepareMetaBlock

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:BCFile.java

示例3: RBlockState

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:BCFile.java

示例4: WBlockState

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:BCFile.java

示例5: WBlockState

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(DTFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:27,代码来源:DTBCFile.java

示例6: RBlockState

import org.apache.hadoop.io.file.tfile.Compression.Algorithm; //导入依赖的package包/类
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, Reader r) throws IOException
{
  this.compressAlgo = compressionAlgo;
  Decompressor decompressor = compressionAlgo.getDecompressor();
  this.region = region;
  try {

    InputStream in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, region.getOffset(), region.getCompressedSize()), decompressor, DTFile.getFSInputBufferSize(conf));
    int l = 1;
    r.baos.reset();
    byte[] buf = new byte[DTFile.getFSInputBufferSize(conf)];
    while (l >= 0) {
      l = in.read(buf);
      if (l > 0) {
        r.baos.write(buf, 0, l);
      }
    }
    // keep decompressed data into cache
    byte[] blockData = r.baos.toByteArray();
    rbain = new ReusableByteArrayInputStream(blockData);
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:27,代码来源:DTBCFile.java


注:本文中的org.apache.hadoop.io.file.tfile.Compression.Algorithm类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。