当前位置: 首页>>代码示例>>Java>>正文


Java Compression.Algorithm方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.compress.Compression.Algorithm方法的典型用法代码示例。如果您正苦于以下问题:Java Compression.Algorithm方法的具体用法?Java Compression.Algorithm怎么用?Java Compression.Algorithm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.compress.Compression的用法示例。


在下文中一共展示了Compression.Algorithm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getMockColumnFamiliesForCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestHFileOutputFormat.java

示例2: generateColumnDescriptors

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
  List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        HColumnDescriptor htd = new HColumnDescriptor(name);
        htd.setCompressionType(compressionType);
        htd.setDataBlockEncoding(encodingType);
        htd.setBloomFilterType(bloomType);
        htds.add(htd);
        familyId++;
      }
    }
  }
  return htds;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseTestingUtility.java

示例3: testNullMetaBlocks

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFile.java

示例4: testCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
public static void testCompression(Compression.Algorithm algo)
    throws IOException {
  if (compressionTestResults[algo.ordinal()] != null) {
    if (compressionTestResults[algo.ordinal()]) {
      return ; // already passed test, dont do it again.
    } else {
      // failed.
      throw new IOException("Compression algorithm '" + algo.getName() + "'" +
      " previously failed test.");
    }
  }

  try {
    Compressor c = algo.getCompressor();
    algo.returnCompressor(c);
    compressionTestResults[algo.ordinal()] = true; // passes
  } catch (Throwable t) {
    compressionTestResults[algo.ordinal()] = false; // failure
    throw new IOException(t);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:22,代码来源:CompressionTest.java

示例5: createFileContext

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(family.isCompressTags())
            .withChecksumType(checksumType)
            .withBytesPerCheckSum(bytesPerChecksum)
            .withBlockSize(blocksize)
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(family.getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .build();
    return hFileContext;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:20,代码来源:HStore.java

示例6: getEncodingContext

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding,
        HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:17,代码来源:TestDataBlockEncoders.java

示例7: createIndexWriter

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
public static IndexWriter createIndexWriter(final FileSystem fs, final Path indexPath,
    final short replication, final int blocksize, final Compression.Algorithm algorithm,
    final IndexKVComparator c, final Configuration conf) throws IOException {
  return new IndexWriter(fs, indexPath, replication, blocksize,
      algorithm == null ? IndexFile.DEFAULT_COMPRESSION_ALGORITHM : algorithm, conf,
      c == null ? IndexKeyValue.COMPARATOR : c);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:StoreFile.java

示例8: getCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/** @return compression type being used for the column family */
public Compression.Algorithm getCompression() {
  String n = getValue(COMPRESSION);
  if (n == null) {
    return Compression.Algorithm.NONE;
  }
  return Compression.Algorithm.valueOf(n.toUpperCase());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:HColumnDescriptor.java

示例9: Writer

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/**
 * Constructor that takes a Path.
 * @param fs
 * @param path
 * @param blocksize
 * @param compress
 * @param comparator
 * @throws IOException
 */
public Writer(FileSystem fs, Path path, short replication, int blocksize,
  Compression.Algorithm compress,
  final IndexKeyComparator comparator)
throws IOException {
  this(fs.create(path,replication), blocksize, compress, comparator);
  this.closeOutputStream = true;
  this.name = path.toString();
  this.path = path;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:IndexFile.java

示例10: createFileContext

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
private HFileContext createFileContext(Compression.Algorithm compression,
    boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
  if (compression == null) {
    compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(includeMVCCReadpoint)
      .withIncludesTags(includesTag).withCompression(compression)
      .withCompressTags(family.isCompressTags()).withChecksumType(checksumType)
      .withBytesPerCheckSum(bytesPerChecksum).withBlockSize(blocksize).withHBaseCheckSum(true)
      .withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(cryptoContext)
      .withCreateTime(EnvironmentEdgeManager.currentTime()).build();
  return hFileContext;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:HStore.java

示例11: HFileBlockDefaultEncodingContext

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/**
 * @param encoding encoding used
 * @param headerBytes dummy header bytes
 * @param fileContext HFile meta data
 */
public HFileBlockDefaultEncodingContext(DataBlockEncoding encoding, byte[] headerBytes,
    HFileContext fileContext) {
  this.encodingAlgo = encoding;
  this.fileContext = fileContext;
  Compression.Algorithm compressionAlgorithm =
      fileContext.getCompression() == null ? NONE : fileContext.getCompression();
  if (compressionAlgorithm != NONE) {
    compressor = compressionAlgorithm.getCompressor();
    compressedByteStream = new ByteArrayOutputStream();
    try {
      compressionStream =
          compressionAlgorithm.createPlainCompressionStream(
              compressedByteStream, compressor);
    } catch (IOException e) {
      throw new RuntimeException(
          "Could not create compression stream for algorithm "
              + compressionAlgorithm, e);
    }
  }

  Encryption.Context cryptoContext = fileContext.getEncryptionContext();
  if (cryptoContext != Encryption.Context.NONE) {
    cryptoByteStream = new ByteArrayOutputStream();
    iv = new byte[cryptoContext.getCipher().getIvLength()];
    new SecureRandom().nextBytes(iv);
  }

  dummyHeader = Preconditions.checkNotNull(headerBytes,
    "Please pass HConstants.HFILEBLOCK_DUMMY_HEADER instead of null for param headerBytes");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:HFileBlockDefaultEncodingContext.java

示例12: setupMockColumnFamiliesForCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
private void setupMockColumnFamiliesForCompression(Table table,
    Map<String, Compression.Algorithm> familyToCompression) throws IOException {
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setCompressionType(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestHFileOutputFormat2.java

示例13: getCompactionCompression

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/** @return compression type being used for the column family for major
    compression */
public Compression.Algorithm getCompactionCompression() {
  String n = getValue(COMPRESSION_COMPACT);
  if (n == null) {
    return getCompression();
  }
  return Compression.Algorithm.valueOf(n.toUpperCase());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:HColumnDescriptor.java

示例14: bloomAndCompressionCombinations

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
/**
 * Create all combinations of Bloom filters and compression algorithms for
 * testing.
 */
private static List<Object[]> bloomAndCompressionCombinations() {
  List<Object[]> configurations = new ArrayList<Object[]>();
  for (Compression.Algorithm comprAlgo :
       HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    for (BloomType bloomType : BloomType.values()) {
      configurations.add(new Object[] { comprAlgo, bloomType });
    }
  }
  return Collections.unmodifiableList(configurations);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HBaseTestingUtility.java

示例15: createTestBlockStr

import org.apache.hadoop.hbase.io.compress.Compression; //导入方法依赖的package包/类
private String createTestBlockStr(Compression.Algorithm algo,
    int correctLength) throws IOException {
  Writer hbw = createTestV2Block(algo);
  byte[] testV2Block = hbw.getHeaderAndData();
  int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + 9;
  if (testV2Block.length == correctLength) {
    // Force-set the "OS" field of the gzip header to 3 (Unix) to avoid
    // variations across operating systems.
    // See http://www.gzip.org/zlib/rfc-gzip.html for gzip format.
    testV2Block[osOffset] = 3;
  }
  return Bytes.toStringBinary(testV2Block);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestHFileBlockCompatibility.java


注:本文中的org.apache.hadoop.hbase.io.compress.Compression.Algorithm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。