当前位置: 首页>>代码示例>>Java>>正文


Java HBaseTestingUtility.createTable方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.createTable方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.createTable方法的具体用法?Java HBaseTestingUtility.createTable怎么用?Java HBaseTestingUtility.createTable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseTestingUtility的用法示例。


在下文中一共展示了HBaseTestingUtility.createTable方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  util.createTable(tableName, new byte[][]{dummy, test});

  Table ht = new HTable(conf, tableName);
  Put p = new Put(row1);
  p.add(dummy, dummy, dummy);
  ht.put(p);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClientOperationInterrupt.java

示例2: createTableAndSnapshot

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
    String snapshotName, int numRegions)
    throws Exception {
  try {
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, bbb, yyy, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getHBaseAdmin();

  // put some stuff in the table
  HTable table = new HTable(util.getConfiguration(), tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = FSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
      Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  // load different values
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  // cause flush to create new files in the region
  admin.flush(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestTableSnapshotScanner.java

示例3: createTableAndSnapshot

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
  String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
  throws Exception {
  try {
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getHBaseAdmin();

  // put some stuff in the table
  HTable table = new HTable(util.getConfiguration(), tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = FSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
    Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  // load different values
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  // cause flush to create new files in the region
  admin.flush(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TableSnapshotInputFormatTestBase.java

示例4: createTable

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void createTable(final HBaseTestingUtility util, final TableName tableName,
    int regionReplication, final byte[]... families) throws IOException, InterruptedException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.setRegionReplication(regionReplication);
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  byte[][] splitKeys = getSplitKeys();
  util.createTable(htd, splitKeys);
  assertEquals((splitKeys.length + 1) * regionReplication,
      util.getHBaseAdmin().getTableRegions(tableName).size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:SnapshotTestingUtils.java

示例5: setupBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowCountEndpoint.class.getName());

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.createTable(TEST_TABLE, new byte[][]{TEST_FAMILY});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestRowCountEndpoint.java

示例6: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHRegion.java


注:本文中的org.apache.hadoop.hbase.HBaseTestingUtility.createTable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。