当前位置: 首页>>代码示例>>Java>>正文


Java DataBlockEncoding类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.DataBlockEncoding的典型用法代码示例。如果您正苦于以下问题:Java DataBlockEncoding类的具体用法?Java DataBlockEncoding怎么用?Java DataBlockEncoding使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DataBlockEncoding类属于org.apache.hadoop.hbase.io.encoding包,在下文中一共展示了DataBlockEncoding类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createFromFileInfo

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
    FileInfo fileInfo) throws IOException {
  DataBlockEncoding encoding = DataBlockEncoding.NONE;
  byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
  if (dataBlockEncodingType != null) {
    String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
    try {
      encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
    } catch (IllegalArgumentException ex) {
      throw new IOException("Invalid data block encoding type in file info: "
        + dataBlockEncodingStr, ex);
    }
  }

  if (encoding == DataBlockEncoding.NONE) {
    return NoOpDataBlockEncoder.INSTANCE;
  }
  return new HFileDataBlockEncoderImpl(encoding);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HFileDataBlockEncoderImpl.java

示例2: AbstractHFileWriter

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
public AbstractHFileWriter(CacheConfig cacheConf,
    FSDataOutputStream outputStream, Path path, 
    KVComparator comparator, HFileContext fileContext) {
  this.outputStream = outputStream;
  this.path = path;
  this.name = path != null ? path.getName() : outputStream.toString();
  this.hFileContext = fileContext;
  DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
  if (encoding != DataBlockEncoding.NONE) {
    this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
  } else {
    this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
  }
  this.comparator = comparator != null ? comparator
      : KeyValue.COMPARATOR;

  closeOutputStream = path != null;
  this.cacheConf = cacheConf;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:AbstractHFileWriter.java

示例3: updateCurrentBlock

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * Updates the current block to be the given {@link HFileBlock}. Seeks to
 * the the first key/value pair.
 *
 * @param newBlock the block to make current
 * @throws CorruptHFileException
 */
private void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
  block = newBlock;

  // sanity checks
  if (block.getBlockType() != BlockType.ENCODED_DATA) {
    throw new IllegalStateException(
        "EncodedScanner works only on encoded data blocks");
  }
  short dataBlockEncoderId = block.getDataBlockEncodingId();
  if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
    String encoderCls = dataBlockEncoder.getClass().getName();
    throw new CorruptHFileException("Encoder " + encoderCls
      + " doesn't support data block encoding "
      + DataBlockEncoding.getNameFromId(dataBlockEncoderId));
  }

  seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
  blockFetches.incrementAndGet();

  // Reset the next indexed key
  this.nextIndexedKey = null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:HFileReaderV2.java

示例4: createPreSplitLoadTestTable

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[][] columnFamilies, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setDurability(durability);
  desc.setRegionReplication(regionReplication);
  HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
  for (int i = 0; i < columnFamilies.length; i++) {
    HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
    hcd.setDataBlockEncoding(dataBlockEncoding);
    hcd.setCompressionType(compression);
    hcds[i] = hcd;
  }
  return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HBaseTestingUtility.java

示例5: generateColumnDescriptors

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
  List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        HColumnDescriptor htd = new HColumnDescriptor(name);
        htd.setCompressionType(compressionType);
        htd.setDataBlockEncoding(encodingType);
        htd.setBloomFilterType(bloomType);
        htds.add(htd);
        familyId++;
      }
    }
  }
  return htds;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseTestingUtility.java

示例6: getMockColumnFamiliesForDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, DataBlockEncoding>
    getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
  Map<String, DataBlockEncoding> familyToDataBlockEncoding =
      new HashMap<String, DataBlockEncoding>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToDataBlockEncoding.put("[email protected]#[email protected]#&", DataBlockEncoding.DIFF);
  }
  if (numCfs-- > 0) {
    familyToDataBlockEncoding.put("Family2=asdads&!AASD",
        DataBlockEncoding.FAST_DIFF);
  }
  if (numCfs-- > 0) {
    familyToDataBlockEncoding.put("Family2=asdads&!AASD",
        DataBlockEncoding.PREFIX);
  }
  if (numCfs-- > 0) {
    familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
  }
  return familyToDataBlockEncoding;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHFileOutputFormat2.java

示例7: testSplitStoreFileWithDifferentEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setDataBlockEncoding(cfEncoding);
  HFileTestUtil.createHFileWithDataBlockEncoding(
      util.getConfiguration(), fs, testIn, bulkloadEncoding,
      FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestLoadIncrementalHFiles.java

示例8: parseColumnFamilyOptions

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:LoadTestTool.java

示例9: majorCompactionWithDataBlockEncoding

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
    throws Exception {
  Map<Store, HFileDataBlockEncoder> replaceBlockCache =
      new HashMap<Store, HFileDataBlockEncoder>();
  for (Store store : r.getStores()) {
    HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
    replaceBlockCache.put(store, blockEncoder);
    final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
    final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
        inCache;
    ((HStore)store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
  }

  majorCompaction();

  // restore settings
  for (Entry<Store, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
    ((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestMajorCompaction.java

示例10: initHRegion

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
    String callingMethod, Configuration conf, byte[]... families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
  Path path = new Path(DIR + callingMethod);
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  return HRegion.createHRegion(info, path, conf, htd);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestJoinedScanners.java

示例11: testCreateWriter

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestStore.java

示例12: readBlock

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
@Override
public HFileBlock readBlock(long offset, long onDiskSize,
    boolean cacheBlock, boolean pread, boolean isCompaction,
    boolean updateCacheMetrics, BlockType expectedBlockType,
    DataBlockEncoding expectedDataBlockEncoding)
    throws IOException {
  if (offset == prevOffset && onDiskSize == prevOnDiskSize &&
      pread == prevPread) {
    hitCount += 1;
    return prevBlock;
  }

  missCount += 1;
  prevBlock = realReader.readBlockData(offset, onDiskSize,
      -1, pread);
  prevOffset = offset;
  prevOnDiskSize = onDiskSize;
  prevPread = pread;

  return prevBlock;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFileBlockIndex.java

示例13: assertBuffersEqual

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestHFileBlock.java

示例14: testEncodingWithCacheInternals

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestHFileDataBlockEncoder.java

示例15: getAllConfigurations

import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入依赖的package包/类
/**
 * @return All possible data block encoding configurations
 */
@Parameters
public static Collection<Object[]> getAllConfigurations() {
  List<Object[]> configurations =
      new ArrayList<Object[]>();

  for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) {
    for (boolean includesMemstoreTS : new boolean[] { false, true }) {
      HFileDataBlockEncoder dbe = (diskAlgo == DataBlockEncoding.NONE) ? 
          NoOpDataBlockEncoder.INSTANCE : new HFileDataBlockEncoderImpl(diskAlgo);
      configurations.add(new Object[] { dbe, new Boolean(includesMemstoreTS) });
    }
  }

  return configurations;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestHFileDataBlockEncoder.java


注:本文中的org.apache.hadoop.hbase.io.encoding.DataBlockEncoding类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。