当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.setConfiguration方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.setConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.setConfiguration方法的具体用法?Java HTableDescriptor.setConfiguration怎么用?Java HTableDescriptor.setConfiguration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setConfiguration方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createTableWithNonDefaultProperties

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void createTableWithNonDefaultProperties() throws Exception {
  final long startTime = System.currentTimeMillis();
  final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
  originalTableName = TableName.valueOf(sourceTableNameAsString);

  // enable replication on a column family
  HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM);
  HColumnDescriptor bloomFilterColumn = new HColumnDescriptor(BLOOMFILTER_FAM);
  HColumnDescriptor dataBlockColumn = new HColumnDescriptor(COMPRESSED_FAM);
  HColumnDescriptor blockSizeColumn = new HColumnDescriptor(BLOCKSIZE_FAM);

  maxVersionsColumn.setMaxVersions(MAX_VERSIONS);
  bloomFilterColumn.setBloomFilterType(BLOOM_TYPE);
  dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE);
  blockSizeColumn.setBlocksize(BLOCK_SIZE);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString));
  htd.addFamily(maxVersionsColumn);
  htd.addFamily(bloomFilterColumn);
  htd.addFamily(dataBlockColumn);
  htd.addFamily(blockSizeColumn);
  htd.setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE);
  htd.setConfiguration(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE);
  assertTrue(htd.getConfiguration().size() > 0);

  admin.createTable(htd);
  Table original = new HTable(UTIL.getConfiguration(), originalTableName);
  originalTableName = TableName.valueOf(sourceTableNameAsString);
  originalTableDescriptor = admin.getTableDescriptor(originalTableName);
  originalTableDescription = originalTableDescriptor.toStringCustomizedValues();

  original.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotMetadata.java

示例2: prepareData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  desc.addFamily(colDesc);

  admin.createTable(desc);
  Table table = TEST_UTIL.getConnection().getTable(tableName);
  Random rand = new Random();
  TimeOffsetEnvironmentEdge edge =
      (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
    edge.increment(1001);
  }
  return getStoreWithName(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestFIFOCompactionPolicy.java

示例3: testSanityCheckTTL

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test  
public void testSanityCheckTTL() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-TTL";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestFIFOCompactionPolicy.java

示例4: testSanityCheckMinVersion

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test  
public void testSanityCheckMinVersion() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-MinVersion";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  colDesc.setMinVersions(1);
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestFIFOCompactionPolicy.java

示例5: testSanityCheckBlockingStoreFiles

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test  
public void testSanityCheckBlockingStoreFiles() throws Exception
{
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10);
  TEST_UTIL.startMiniCluster(1);

  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  String tableName = this.tableName.getNameAsString()+"-MinVersion";
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
    FIFOCompactionPolicy.class.getName());
  desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
    DisabledRegionSplitPolicy.class.getName());
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  colDesc.setTimeToLive(1); // 1 sec
  desc.addFamily(colDesc);
  try{
    admin.createTable(desc);
    Assert.fail();
  }catch(Exception e){      
  }finally{
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestFIFOCompactionPolicy.java

示例6: testStoreUsesConfigurationFromHcdAndHtd

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
  final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
  long anyValue = 10;

  // We'll check that it uses correct config and propagates it appropriately by going thru
  // the simplest "real" path I can find - "throttleCompaction", which just checks whether
  // a number we pass in is higher than some config value, inside compactionPolicy.
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(CONFIG_KEY, anyValue);
  init(name.getMethodName() + "-xml", conf);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HTD overrides XML.
  --anyValue;
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-htd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));

  // HCD overrides them both.
  --anyValue;
  hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
  init(name.getMethodName() + "-hcd", conf, htd, hcd);
  Assert.assertTrue(store.throttleCompaction(anyValue + 1));
  Assert.assertFalse(store.throttleCompaction(anyValue));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestStore.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setConfiguration方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。