當前位置: 首頁>>代碼示例>>Java>>正文


Java HColumnDescriptor.setDataBlockEncoding方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor.setDataBlockEncoding方法的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor.setDataBlockEncoding方法的具體用法?Java HColumnDescriptor.setDataBlockEncoding怎麽用?Java HColumnDescriptor.setDataBlockEncoding使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HColumnDescriptor的用法示例。


在下文中一共展示了HColumnDescriptor.setDataBlockEncoding方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testSplitStoreFileWithDifferentEncoding

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setDataBlockEncoding(cfEncoding);
  HFileTestUtil.createHFileWithDataBlockEncoding(
      util.getConfiguration(), fs, testIn, bulkloadEncoding,
      FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestLoadIncrementalHFiles.java

示例2: initHRegion

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
    String callingMethod, Configuration conf, byte[]... families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
  Path path = new Path(DIR + callingMethod);
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  return HRegion.createHRegion(info, path, conf, htd);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestJoinedScanners.java

示例3: testCreateWriter

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestStore.java

示例4: perform

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  LOG.debug("Performing action: Changing encodings on " + tableName);
  // possible DataBlockEncoding id's
  int[] possibleIds = {0, 2, 3, 4, 6};
  for (HColumnDescriptor descriptor : columnDescriptors) {
    short id = (short) possibleIds[random.nextInt(possibleIds.length)];
    descriptor.setDataBlockEncoding(DataBlockEncoding.getEncodingById(id));
    LOG.debug("Set encoding of column family " + descriptor.getNameAsString()
      + " to: " + descriptor.getDataBlockEncoding());
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }
  admin.modifyTable(tableName, tableDescriptor);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:ChangeEncodingAction.java

示例5: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestSnapshotMetadata.java

示例6: getDefaultColumnDescriptor

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
public static HColumnDescriptor getDefaultColumnDescriptor(byte[] family) {
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  //    colDesc.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
  colDesc.setCompressionType(Compression.Algorithm.NONE);
  return colDesc;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:IndexTableRelation.java

示例7: testBlockMultiLimits

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Test
public void testBlockMultiLimits() throws Exception {
  final TableName name = TableName.valueOf("testBlockMultiLimits");
  HTableDescriptor desc = new HTableDescriptor(name);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
  hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  desc.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
  Table t = TEST_UTIL.getConnection().getTable(name);

  final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  RpcServerInterface rpcServer = regionServer.getRpcServer();
  BaseSource s = rpcServer.getMetrics().getMetricsSource();
  long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
  long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);

  byte[] row = Bytes.toBytes("TEST");
  byte[][] cols = new byte[][]{
      Bytes.toBytes("0"), // Get this
      Bytes.toBytes("1"), // Buffer
      Bytes.toBytes("2"), // Buffer
      Bytes.toBytes("3"), // Get This
      Bytes.toBytes("4"), // Buffer
      Bytes.toBytes("5"), // Buffer
  };

  // Set the value size so that one result will be less than the MAX_SIE
  // however the block being reference will be larger than MAX_SIZE.
  // This should cause the regionserver to try and send a result immediately.
  byte[] value = new byte[MAX_SIZE - 100];
  ThreadLocalRandom.current().nextBytes(value);

  for (byte[] col:cols) {
    Put p = new Put(row);
    p.addImmutable(FAMILY, col, value);
    t.put(p);
  }

  // Make sure that a flush happens
  try (final Admin admin = TEST_UTIL.getHBaseAdmin()) {
    admin.flush(name);
    TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        return regionServer.getOnlineRegions(name).get(0).getMaxFlushedSeqId() > 3;
      }
    });
  }

  List<Get> gets = new ArrayList<>(2);
  Get g0 = new Get(row);
  g0.addColumn(FAMILY, cols[0]);
  gets.add(g0);

  Get g2 = new Get(row);
  g2.addColumn(FAMILY, cols[3]);
  gets.add(g2);

  Result[] results = t.get(gets);
  assertEquals(2, results.length);
  METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s);
  METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",
      startingMultiExceptions, s);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:65,代碼來源:TestMultiRespectsLimits.java

示例8: testJoinedScanners

import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Test
public void testJoinedScanners() throws Exception {
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  HBaseTestingUtility htu = new HBaseTestingUtility();

  final int DEFAULT_BLOCK_SIZE = 1024*1024;
  htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 1);
  htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
  MiniHBaseCluster cluster = null;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte [][] families = {cf_essential, cf_joined};

    TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    for(byte[] family : families) {
      HColumnDescriptor hcd = new HColumnDescriptor(family);
      hcd.setDataBlockEncoding(blockEncoding);
      desc.addFamily(hcd);
    }
    htu.getHBaseAdmin().createTable(desc);
    Table ht = new HTable(htu.getConfiguration(), tableName);

    long rows_to_insert = 1000;
    int insert_batch = 20;
    long time = System.nanoTime();
    Random rand = new Random(time);

    LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = "
      + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB");

    byte [] val_large = new byte[valueWidth];

    List<Put> puts = new ArrayList<Put>();

    for (long i = 0; i < rows_to_insert; i++) {
      Put put = new Put(Bytes.toBytes(Long.toString (i)));
      if (rand.nextInt(100) <= selectionRatio) {
        put.add(cf_essential, col_name, flag_yes);
      } else {
        put.add(cf_essential, col_name, flag_no);
      }
      put.add(cf_joined, col_name, val_large);
      puts.add(put);
      if (puts.size() >= insert_batch) {
        ht.put(puts);
        puts.clear();
      }
    }
    if (puts.size() >= 0) {
      ht.put(puts);
      puts.clear();
    }

    LOG.info("Data generated in "
      + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds");

    boolean slow = true;
    for (int i = 0; i < 10; ++i) {
      runScanner(ht, slow);
      slow = !slow;
    }

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:75,代碼來源:TestJoinedScanners.java


注:本文中的org.apache.hadoop.hbase.HColumnDescriptor.setDataBlockEncoding方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。