当前位置: 首页>>代码示例>>Java>>正文


Java MetaReader.getTableRegions方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaReader.getTableRegions方法的典型用法代码示例。如果您正苦于以下问题:Java MetaReader.getTableRegions方法的具体用法?Java MetaReader.getTableRegions怎么用?Java MetaReader.getTableRegions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.catalog.MetaReader的用法示例。


在下文中一共展示了MetaReader.getTableRegions方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getReopenStatus

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Used by the client to identify if all regions have the schema updates
 *
 * @param tableName
 * @return Pair indicating the status of the alter command
 * @throws IOException
 */
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
    throws IOException {
  List <HRegionInfo> hris =
    MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
  Integer pending = 0;
  for (HRegionInfo hri : hris) {
    String name = hri.getEncodedName();
    // no lock concurrent access ok: sequential consistency respected.
    if (regionsToReopen.containsKey(name)
        || regionStates.isRegionInTransition(name)) {
      pending++;
    }
  }
  return new Pair<Integer, Integer>(pending, hris.size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:AssignmentManager.java

示例2: getTableRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * get the regions of a given table.
 * @param tableName the name of the table
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */
public List<HRegionInfo> getTableRegions(final byte[] tableName) throws IOException {
  CatalogTracker ct = getCatalogTracker();
  List<HRegionInfo> Regions = null;
  try {
    Regions = MetaReader.getTableRegions(ct, tableName, true);
  } finally {
    cleanupCatalogTracker(ct);
  }
  return Regions;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:HBaseAdmin.java

示例3: getReopenStatus

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Used by the client to identify if all regions have the schema updates
 *
 * @param tableName
 * @return Pair indicating the status of the alter command
 * @throws IOException
 */
public Pair<Integer, Integer> getReopenStatus(byte[] tableName)
throws IOException {
  List <HRegionInfo> hris =
    MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName);
  Integer pending = 0;
  for(HRegionInfo hri : hris) {
    String name = hri.getEncodedName();
    if (regionsToReopen.containsKey(name) || regionsInTransition.containsKey(name)) {
      pending++;
    }
  }
  return new Pair<Integer, Integer>(pending, hris.size());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:AssignmentManager.java

示例4: verifyRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Check that all the regions in the snapshot are valid, and accounted for.
 * @param snapshotDir snapshot directory to check
 * @throws IOException if we can't reach hbase:meta or read the files from the FS
 */
private void verifyRegions(Path snapshotDir) throws IOException {
  List<HRegionInfo> regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
      tableName);

  Set<String> snapshotRegions = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
  if (snapshotRegions == null) {
    String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty";
    LOG.error(msg);
    throw new CorruptedSnapshotException(msg);
  }

  String errorMsg = "";
  if (snapshotRegions.size() != regions.size()) {
    errorMsg = "Regions moved during the snapshot '" + 
                 ClientSnapshotDescriptionUtils.toString(snapshot) + "'. expected=" +
                 regions.size() + " snapshotted=" + snapshotRegions.size() + ".";
    LOG.error(errorMsg);
  }

  for (HRegionInfo region : regions) {
    if (!snapshotRegions.contains(region.getEncodedName())) {
      // could happen due to a move or split race.
      String mesg = " No snapshot region directory found for region:" + region;
      if (errorMsg.isEmpty()) errorMsg = mesg;
      LOG.error(mesg);
    }

    verifyRegion(fs, snapshotDir, region);
  }
  if (!errorMsg.isEmpty()) {
    throw new CorruptedSnapshotException(errorMsg);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:39,代码来源:MasterSnapshotVerifier.java

示例5: getTableRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * get the regions of a given table.
 *
 * @param tableName the name of the table
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */
public List<HRegionInfo> getTableRegions(final TableName tableName)
throws IOException {
  CatalogTracker ct = getCatalogTracker();
  List<HRegionInfo> Regions = null;
  try {
    Regions = MetaReader.getTableRegions(ct, tableName, true);
  } finally {
    cleanupCatalogTracker(ct);
  }
  return Regions;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:HBaseAdmin.java

示例6: getTableRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * get the regions of a given table.
 *
 * @param tableName the name of the table
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */
public List<HRegionInfo> getTableRegions(final byte[] tableName)
throws IOException {
  CatalogTracker ct = getCatalogTracker();
  List<HRegionInfo> Regions = null;
  try {
    Regions = MetaReader.getTableRegions(ct, tableName, true);
  } finally {
    cleanupCatalogTracker(ct);
  }
  return Regions;
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:19,代码来源:HBaseAdmin.java

示例7: getTableRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * get the regions of a given table.
 *
 * @param tableName the name of the table
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */  
public List<HRegionInfo> getTableRegions(final byte[] tableName)
throws IOException {
  CatalogTracker ct = getCatalogTracker();
  List<HRegionInfo> Regions = null;
  try {
    Regions = MetaReader.getTableRegions(ct, tableName, true);
  } finally {
    cleanupCatalogTracker(ct);
  }
  return Regions;
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:19,代码来源:HBaseAdmin.java

示例8: handleEnableTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private void handleEnableTable() throws IOException, KeeperException {
  // I could check table is disabling and if so, not enable but require
  // that user first finish disabling but that might be obnoxious.

  // Set table enabling flag up in zk.
  this.assignmentManager.getZKTable().setEnablingTable(this.tableNameStr);
  boolean done = false;
  // Get the regions of this table. We're done when all listed
  // tables are onlined.
  List<HRegionInfo> regionsInMeta;
  regionsInMeta = MetaReader.getTableRegions(this.ct, tableName, true);
  int countOfRegionsInTable = regionsInMeta.size();
  List<HRegionInfo> regions = regionsToAssign(regionsInMeta);
  int regionsCount = regions.size();
  if (regionsCount == 0) {
    done = true;
  }
  LOG.info("Table has " + countOfRegionsInTable + " regions of which " +
    regionsCount + " are offline.");
  BulkEnabler bd = new BulkEnabler(this.server, regions,
    countOfRegionsInTable);
  try {
    if (bd.bulkAssign()) {
      done = true;
    }
  } catch (InterruptedException e) {
    LOG.warn("Enable was interrupted");
    // Preserve the interrupt.
    Thread.currentThread().interrupt();
  }
  // Flip the table to enabled.
  if (done) this.assignmentManager.getZKTable().setEnabledTable(
    this.tableNameStr);
  LOG.info("Enabled table is done=" + done);
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:36,代码来源:EnableTableHandler.java

示例9: getReopenStatus

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Used by the client to identify if all regions have the schema updates
 *
 * @param tableName
 * @return Pair indicating the status of the alter command
 * @throws IOException
 * @throws InterruptedException 
 */
public Pair<Integer, Integer> getReopenStatus(byte[] tableName)
throws IOException, InterruptedException {
  List <HRegionInfo> hris =
    MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName);
  Integer pending = 0;
  for(HRegionInfo hri : hris) {
    if(regionsToReopen.get(hri.getEncodedName()) != null) {
      pending++;
    }
  }
  return new Pair<Integer, Integer>(pending, hris.size());
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:21,代码来源:AssignmentManager.java

示例10: testMergeTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Test merge.
 * Hand-makes regions of a mergeable size and adds the hand-made regions to
 * hand-made meta.  The hand-made regions are created offline.  We then start
 * up mini cluster, disables the hand-made table and starts in on merging.
 * @throws Exception
 */
@Test (timeout=300000) public void testMergeTable() throws Exception {
  // Table we are manually creating offline.
  HTableDescriptor desc = new HTableDescriptor(Bytes.toBytes("test"));
  desc.addFamily(new HColumnDescriptor(COLUMN_NAME));

  // Set maximum regionsize down.
  UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
  // Make it so we don't split.
  UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
  // Startup hdfs.  Its in here we'll be putting our manually made regions.
  UTIL.startMiniDFSCluster(1);
  // Create hdfs hbase rootdir.
  Path rootdir = UTIL.createRootDir();
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  if (fs.exists(rootdir)) {
    if (fs.delete(rootdir, true)) {
      LOG.info("Cleaned up existing " + rootdir);
    }
  }

  // Now create three data regions: The first is too large to merge since it
  // will be > 64 MB in size. The second two will be smaller and will be
  // selected for merging.

  // To ensure that the first region is larger than 64MB we need to write at
  // least 65536 rows. We will make certain by writing 70000
  byte [] row_70001 = Bytes.toBytes("row_70001");
  byte [] row_80001 = Bytes.toBytes("row_80001");

  // Create regions and populate them at same time.  Create the tabledir
  // for them first.
  FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
  HRegion [] regions = {
    createRegion(desc, null, row_70001, 1, 70000, rootdir),
    createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
    createRegion(desc, row_80001, null, 80001, 11000, rootdir)
  };

  // Now create the root and meta regions and insert the data regions
  // created above into .META.
  setupROOTAndMeta(rootdir, regions);
  try {
    LOG.info("Starting mini zk cluster");
    UTIL.startMiniZKCluster();
    LOG.info("Starting mini hbase cluster");
    UTIL.startMiniHBaseCluster(1, 1);
    Configuration c = new Configuration(UTIL.getConfiguration());
    CatalogTracker ct = new CatalogTracker(c);
    ct.start();
    List<HRegionInfo> originalTableRegions =
      MetaReader.getTableRegions(ct, desc.getName());
    LOG.info("originalTableRegions size=" + originalTableRegions.size() +
      "; " + originalTableRegions);
    HBaseAdmin admin = new HBaseAdmin(new Configuration(c));
    admin.disableTable(desc.getName());
    HMerge.merge(c, FileSystem.get(c), desc.getName());
    List<HRegionInfo> postMergeTableRegions =
      MetaReader.getTableRegions(ct, desc.getName());
    LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
      "; " + postMergeTableRegions);
    assertTrue("originalTableRegions=" + originalTableRegions.size() +
      ", postMergeTableRegions=" + postMergeTableRegions.size(),
      postMergeTableRegions.size() < originalTableRegions.size());
    LOG.info("Done with merge");
  } finally {
    UTIL.shutdownMiniCluster();
    LOG.info("After cluster shutdown");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:77,代码来源:TestMergeTable.java

示例11: testMergeTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Test merge.
 * Hand-makes regions of a mergeable size and adds the hand-made regions to
 * hand-made meta.  The hand-made regions are created offline.  We then start
 * up mini cluster, disables the hand-made table and starts in on merging.
 * @throws Exception
 */
@Test (timeout=300000) public void testMergeTable() throws Exception {
  // Table we are manually creating offline.
  HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test")));
  desc.addFamily(new HColumnDescriptor(COLUMN_NAME));

  // Set maximum regionsize down.
  UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
  // Make it so we don't split.
  UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
  // Startup hdfs.  Its in here we'll be putting our manually made regions.
  UTIL.startMiniDFSCluster(1);
  // Create hdfs hbase rootdir.
  Path rootdir = UTIL.createRootDir();
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  if (fs.exists(rootdir)) {
    if (fs.delete(rootdir, true)) {
      LOG.info("Cleaned up existing " + rootdir);
    }
  }

  // Now create three data regions: The first is too large to merge since it
  // will be > 64 MB in size. The second two will be smaller and will be
  // selected for merging.

  // To ensure that the first region is larger than 64MB we need to write at
  // least 65536 rows. We will make certain by writing 70000
  byte [] row_70001 = Bytes.toBytes("row_70001");
  byte [] row_80001 = Bytes.toBytes("row_80001");

  // Create regions and populate them at same time.  Create the tabledir
  // for them first.
  new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
  HRegion [] regions = {
    createRegion(desc, null, row_70001, 1, 70000, rootdir),
    createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
    createRegion(desc, row_80001, null, 80001, 11000, rootdir)
  };

  // Now create the root and meta regions and insert the data regions
  // created above into hbase:meta
  setupMeta(rootdir, regions);
  try {
    LOG.info("Starting mini zk cluster");
    UTIL.startMiniZKCluster();
    LOG.info("Starting mini hbase cluster");
    UTIL.startMiniHBaseCluster(1, 1);
    Configuration c = new Configuration(UTIL.getConfiguration());
    CatalogTracker ct = new CatalogTracker(c);
    ct.start();
    List<HRegionInfo> originalTableRegions =
      MetaReader.getTableRegions(ct, desc.getTableName());
    LOG.info("originalTableRegions size=" + originalTableRegions.size() +
      "; " + originalTableRegions);
    HBaseAdmin admin = new HBaseAdmin(c);
    admin.disableTable(desc.getTableName());
    HMerge.merge(c, FileSystem.get(c), desc.getTableName());
    List<HRegionInfo> postMergeTableRegions =
      MetaReader.getTableRegions(ct, desc.getTableName());
    LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
      "; " + postMergeTableRegions);
    assertTrue("originalTableRegions=" + originalTableRegions.size() +
      ", postMergeTableRegions=" + postMergeTableRegions.size(),
      postMergeTableRegions.size() < originalTableRegions.size());
    LOG.info("Done with merge");
  } finally {
    UTIL.shutdownMiniCluster();
    LOG.info("After cluster shutdown");
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:77,代码来源:TestMergeTable.java

示例12: testRegionMove

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testRegionMove() throws Exception {
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();
  HMaster master = cluster.getMaster();
  ZooKeeperWatcher zkw = UTIL.getZooKeeperWatcher(UTIL);
  TableName tableName = TableName.valueOf("testRegionMove");
  HTableDescriptor iHtd = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor("col").setMaxVersions(Integer.MAX_VALUE);
  IndexSpecification iSpec = new IndexSpecification("ScanIndexf");
  iSpec.addIndexColumn(hcd, "q1", ValueType.String, 10);
  TableIndices tableIndices = new TableIndices();
  tableIndices.addIndex(iSpec);
  iHtd.addFamily(hcd);
  iHtd.setValue(Constants.INDEX_SPEC_KEY, tableIndices.toByteArray());
  char c = 'A';
  byte[][] split = new byte[4][];
  for (int i = 0; i < 4; i++) {
    byte[] b = { (byte) c };
    split[i] = b;
    c++;
  }
  admin.createTable(iHtd, split);
  List<HRegionInfo> tableRegions =
      MetaReader.getTableRegions(master.getCatalogTracker(),
        iHtd.getTableName());
  int numRegions = cluster.getRegionServerThreads().size();
  cluster.getRegionServer(1).getServerName();
  Random random = new Random();
  for (HRegionInfo hRegionInfo : tableRegions) {
    int regionNumber = random.nextInt(numRegions);
    ServerName serverName = cluster.getRegionServer(regionNumber).getServerName();
    admin.move(hRegionInfo.getEncodedNameAsBytes(), Bytes.toBytes(serverName.getServerName()));
  }
  ZKAssign.blockUntilNoRIT(zkw);
  AssignmentManager am = UTIL.getHBaseCluster().getMaster().getAssignmentManager();
  while(!am.waitUntilNoRegionsInTransition(1000));
  boolean isRegionColocated =
      TestUtils.checkForColocation(master, tableName.getNameAsString(),
        IndexUtils.getIndexTableName(tableName));
  assertTrue("User regions and index regions should colocate.", isRegionColocated);

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:43,代码来源:TestSecIndexLoadBalancer.java

示例13: getTableRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
public List<HRegionInfo> getTableRegions(final TableName tableName)
    throws IOException {
    return MetaReader.getTableRegions(catTracker, tableName, true);
}
 
开发者ID:jinyeluo,项目名称:smarthbasecompactor,代码行数:5,代码来源:HbaseBatchExecutor.java

示例14: verifyRegions

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Check that all the regions in the snapshot are valid, and accounted for.
 * @param manifest snapshot manifest to inspect
 * @throws IOException if we can't reach hbase:meta or read the files from the FS
 */
private void verifyRegions(final SnapshotManifest manifest) throws IOException {
  List<HRegionInfo> regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
      tableName);

  Map<String, SnapshotRegionManifest> regionManifests = manifest.getRegionManifestsMap();
  if (regionManifests == null) {
    String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty";
    LOG.error(msg);
    throw new CorruptedSnapshotException(msg);
  }

  String errorMsg = "";
  if (regionManifests.size() != regions.size()) {
    errorMsg = "Regions moved during the snapshot '" +
                 ClientSnapshotDescriptionUtils.toString(snapshot) + "'. expected=" +
                 regions.size() + " snapshotted=" + regionManifests.size() + ".";
    LOG.error(errorMsg);
  }

  // Verify HRegionInfo
  for (HRegionInfo region : regions) {
    SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName());
    if (regionManifest == null) {
      // could happen due to a move or split race.
      String mesg = " No snapshot region directory found for region:" + region;
      if (errorMsg.isEmpty()) errorMsg = mesg;
      LOG.error(mesg);
      continue;
    }

    verifyRegionInfo(region, regionManifest);
  }

  if (!errorMsg.isEmpty()) {
    throw new CorruptedSnapshotException(errorMsg);
  }

  // Verify Snapshot HFiles
  SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), fs, manifest);
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:46,代码来源:MasterSnapshotVerifier.java

示例15: testMergeTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Test merge.
 * Hand-makes regions of a mergeable size and adds the hand-made regions to
 * hand-made meta.  The hand-made regions are created offline.  We then start
 * up mini cluster, disables the hand-made table and starts in on merging.
 * @throws Exception
 */
@Test (timeout=300000) public void testMergeTable() throws Exception {
  // Table we are manually creating offline.
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test")));
  desc.addFamily(new HColumnDescriptor(COLUMN_NAME));

  // Set maximum regionsize down.
  UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
  // Make it so we don't split.
  UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
  // Startup hdfs.  Its in here we'll be putting our manually made regions.
  UTIL.startMiniDFSCluster(1);
  // Create hdfs hbase rootdir.
  Path rootdir = UTIL.createRootDir();
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  if (fs.exists(rootdir)) {
    if (fs.delete(rootdir, true)) {
      LOG.info("Cleaned up existing " + rootdir);
    }
  }

  // Now create three data regions: The first is too large to merge since it
  // will be > 64 MB in size. The second two will be smaller and will be
  // selected for merging.

  // To ensure that the first region is larger than 64MB we need to write at
  // least 65536 rows. We will make certain by writing 70000
  byte [] row_70001 = Bytes.toBytes("row_70001");
  byte [] row_80001 = Bytes.toBytes("row_80001");

  // Create regions and populate them at same time.  Create the tabledir
  // for them first.
  new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
  HRegion [] regions = {
    createRegion(desc, null, row_70001, 1, 70000, rootdir),
    createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
    createRegion(desc, row_80001, null, 80001, 11000, rootdir)
  };

  // Now create the root and meta regions and insert the data regions
  // created above into hbase:meta
  setupMeta(rootdir, regions);
  try {
    LOG.info("Starting mini zk cluster");
    UTIL.startMiniZKCluster();
    LOG.info("Starting mini hbase cluster");
    UTIL.startMiniHBaseCluster(1, 1);
    Configuration c = new Configuration(UTIL.getConfiguration());
    CatalogTracker ct = new CatalogTracker(c);
    ct.start();
    List<HRegionInfo> originalTableRegions =
      MetaReader.getTableRegions(ct, desc.getTableName());
    LOG.info("originalTableRegions size=" + originalTableRegions.size() +
      "; " + originalTableRegions);
    HBaseAdmin admin = new HBaseAdmin(c);
    admin.disableTable(desc.getTableName());
    HMerge.merge(c, FileSystem.get(c), desc.getTableName());
    List<HRegionInfo> postMergeTableRegions =
      MetaReader.getTableRegions(ct, desc.getTableName());
    LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
      "; " + postMergeTableRegions);
    assertTrue("originalTableRegions=" + originalTableRegions.size() +
      ", postMergeTableRegions=" + postMergeTableRegions.size(),
      postMergeTableRegions.size() < originalTableRegions.size());
    LOG.info("Done with merge");
  } finally {
    UTIL.shutdownMiniCluster();
    LOG.info("After cluster shutdown");
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:77,代码来源:TestMergeTable.java


注:本文中的org.apache.hadoop.hbase.catalog.MetaReader.getTableRegions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。