當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.setConfiguration方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.setConfiguration方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.setConfiguration方法的具體用法?Java HTableDescriptor.setConfiguration怎麽用?Java HTableDescriptor.setConfiguration使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setConfiguration方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestSnapshotMetadata.java

示例2: prepareData

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  desc.addFamily(colDesc);

  admin.createTable(desc);
  Table table = TEST_UTIL.getConnection().getTable(tableName);
  Random rand = new Random();
  TimeOffsetEnvironmentEdge edge =
      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
    edge.increment(1001);
  }
  return getStoreWithName(tableName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:TestFIFOCompactionPolicy.java

示例3: testSanityCheckTTL

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test  
public void testSanityCheckTTL() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-TTL";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestFIFOCompactionPolicy.java

示例4: testSanityCheckMinVersion

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test  
public void testSanityCheckMinVersion() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-MinVersion";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  colDesc.setMinVersions(1);
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestFIFOCompactionPolicy.java

示例5: testSanityCheckBlockingStoreFiles

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test  
public void testSanityCheckBlockingStoreFiles() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-MinVersion";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestFIFOCompactionPolicy.java

示例6: testStoreUsesConfigurationFromHcdAndHtd

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
  final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
  long anyValue = 10;

  // We'll check that it uses correct config and propagates it appropriately by going thru
  // the simplest "real" path I can find - "throttleCompaction", which just checks whether
  // a number we pass in is higher than some config value, inside compactionPolicy.
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(CONFIG_KEY, anyValue);
  init(name.getMethodName() + "-xml", conf);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HTD overrides XML.
  --anyValue;
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-htd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HCD overrides them both.
  --anyValue;
  hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-hcd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestStore.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setConfiguration方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。