當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.setMaxFileSize方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.setMaxFileSize方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.setMaxFileSize方法的具體用法?Java HTableDescriptor.setMaxFileSize怎麽用?Java HTableDescriptor.setMaxFileSize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setMaxFileSize方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testModifyTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testModifyTable() throws Exception {
  final TableName tableName = TableName.valueOf("testModifyTable");
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf");
  UTIL.getHBaseAdmin().disableTable(tableName);

  // Modify the table descriptor
  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));

  // Test 1: Modify 1 property
  long newMaxFileSize = htd.getMaxFileSize() * 2;
  htd.setMaxFileSize(newMaxFileSize);
  htd.setRegionReplication(3);

  long procId1 = ProcedureTestingUtility.submitAndWait(
      procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));

  HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newMaxFileSize, currentHtd.getMaxFileSize());

  // Test 2: Modify multiple properties
  boolean newReadOnlyOption = htd.isReadOnly() ? false : true;
  long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2;
  htd.setReadOnly(newReadOnlyOption);
  htd.setMemStoreFlushSize(newMemStoreFlushSize);

  long procId2 = ProcedureTestingUtility.submitAndWait(
      procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));

  currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newReadOnlyOption, currentHtd.isReadOnly());
  assertEquals(newMemStoreFlushSize, currentHtd.getMemStoreFlushSize());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:TestModifyTableProcedure.java

示例2: setupTables

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private static void setupTables() throws IOException {
  // Get the table name.
  tableName = TableName.valueOf(util.getConfiguration()
      .get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));

  loadTableName = TableName.valueOf(util.getConfiguration()
      .get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));

  if (util.getHBaseAdmin().tableExists(tableName)) {
    util.deleteTable(tableName);
  }

  if (util.getHBaseAdmin().tableExists(loadTableName)) {
    util.deleteTable(loadTableName);
  }

  // Create the table.  If this fails then fail everything.
  HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);

  // Make the max file size huge so that splits don't happen during the test.
  tableDescriptor.setMaxFileSize(Long.MAX_VALUE);

  HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY);
  descriptor.setMaxVersions(1);
  tableDescriptor.addFamily(descriptor);
  util.getHBaseAdmin().createTable(tableDescriptor);

  // Setup the table for LoadTestTool
  int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"});
  assertEquals("Failed to initialize LoadTestTool", 0, ret);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:IntegrationTestMTTR.java

示例3: main

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
  int numRegions = Integer.parseInt(args[0]);
  long numRows = Long.parseLong(args[1]);

  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  htd.setMaxFileSize(10L * 1024 * 1024 * 1024);
  htd.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  htd.addFamily(new HColumnDescriptor(FAMILY3));

  Configuration conf = HBaseConfiguration.create();
  Connection conn = ConnectionFactory.createConnection(conf);
  Admin admin = conn.getAdmin();
  if (admin.tableExists(TABLENAME)) {
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
  }
  if (numRegions >= 3) {
    byte[] startKey = new byte[16];
    byte[] endKey = new byte[16];
    Arrays.fill(endKey, (byte) 0xFF);
    admin.createTable(htd, startKey, endKey, numRegions);
  } else {
    admin.createTable(htd);
  }
  admin.close();

  Table table = conn.getTable(TABLENAME);
  byte[] qf = Bytes.toBytes("qf");
  Random rand = new Random();
  byte[] value1 = new byte[16];
  byte[] value2 = new byte[256];
  byte[] value3 = new byte[4096];
  for (long i = 0; i < numRows; i++) {
    Put put = new Put(Hashing.md5().hashLong(i).asBytes());
    rand.setSeed(i);
    rand.nextBytes(value1);
    rand.nextBytes(value2);
    rand.nextBytes(value3);
    put.addColumn(FAMILY1, qf, value1);
    put.addColumn(FAMILY2, qf, value2);
    put.addColumn(FAMILY3, qf, value3);
    table.put(put);
    if (i % 10000 == 0) {
      LOG.info(i + " rows put");
    }
  }
  table.close();
  conn.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:TestPerColumnFamilyFlush.java

示例4: perform

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
public void perform() throws Exception {
  HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
  Admin admin = util.getHBaseAdmin();
  HTableDescriptor htd = admin.getTableDescriptor(tableName);

  // Try and get the current value.
  long currentValue = htd.getMaxFileSize();

  // If the current value is not set use the default for the cluster.
  // If configs are really weird this might not work.
  // That's ok. We're trying to cause chaos.
  if (currentValue <= 0) {
    currentValue =
        context.getHBaseCluster().getConf().getLong(HConstants.HREGION_MAX_FILESIZE,
            HConstants.DEFAULT_MAX_FILE_SIZE);
  }

  // Decrease by 10% at a time.
  long newValue = (long) (currentValue * 0.9);

  // We don't want to go too far below 1gb.
  // So go to about 1gb +/- 512 on each side.
  newValue = Math.max(minFileSize, newValue) - (512 - random.nextInt(1024));

  // Change the table descriptor.
  htd.setMaxFileSize(newValue);

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }

  // modify the table.
  admin.modifyTable(tableName, htd);

  // Sleep some time.
  if (sleepTime > 0) {
    Thread.sleep(sleepTime);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:42,代碼來源:DecreaseMaxHFileSizeAction.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setMaxFileSize方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。