当前位置: 首页>>代码示例>>Java>>正文


Java MetaReader.getTableRegionsAndLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.catalog.MetaReader.getTableRegionsAndLocations方法的典型用法代码示例。如果您正苦于以下问题:Java MetaReader.getTableRegionsAndLocations方法的具体用法?Java MetaReader.getTableRegionsAndLocations怎么用?Java MetaReader.getTableRegionsAndLocations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.catalog.MetaReader的用法示例。


在下文中一共展示了MetaReader.getTableRegionsAndLocations方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: waitAndVerifyRegionNum

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private void waitAndVerifyRegionNum(HMaster master, TableName tablename,
    int expectedRegionNum) throws Exception {
  List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
  List<HRegionInfo> tableRegionsInMaster;
  long timeout = System.currentTimeMillis() + waitTime;
  while (System.currentTimeMillis() < timeout) {
    tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
        master.getCatalogTracker(), tablename);
    tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
        .getRegionsOfTable(tablename);
    if (tableRegionsInMeta.size() == expectedRegionNum
        && tableRegionsInMaster.size() == expectedRegionNum) {
      break;
    }
    Thread.sleep(250);
  }

  tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
      master.getCatalogTracker(), tablename);
  LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
  assertEquals(expectedRegionNum, tableRegionsInMeta.size());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:TestRegionMergeTransactionOnCluster.java

示例2: getStartKeysAndLocations

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
public static List<Pair<byte[], ServerName>> getStartKeysAndLocations(HMaster master,
    String tableName) throws IOException, InterruptedException {

  List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
      MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(),
        TableName.valueOf(tableName));
  List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
      new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
  Pair<byte[], ServerName> startKeyAndLocation = null;
  for (Pair<HRegionInfo, ServerName> regionAndLocation : tableRegionsAndLocations) {
    startKeyAndLocation =
        new Pair<byte[], ServerName>(regionAndLocation.getFirst().getStartKey(),
            regionAndLocation.getSecond());
    startKeyAndLocationPairs.add(startKeyAndLocation);
  }
  return startKeyAndLocationPairs;

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestUtils.java

示例3: getStartKeysAndLocations

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private List<Pair<byte[], ServerName>> getStartKeysAndLocations(HMaster master, String tableName)
    throws IOException, InterruptedException {

  List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations =
      MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(),
        TableName.valueOf(tableName));
  List<Pair<byte[], ServerName>> startKeyAndLocationPairs =
      new ArrayList<Pair<byte[], ServerName>>(tableRegionsAndLocations.size());
  Pair<byte[], ServerName> startKeyAndLocation = null;
  for (Pair<HRegionInfo, ServerName> regionAndLocation : tableRegionsAndLocations) {
    startKeyAndLocation =
        new Pair<byte[], ServerName>(regionAndLocation.getFirst().getStartKey(),
            regionAndLocation.getSecond());
    startKeyAndLocationPairs.add(startKeyAndLocation);
  }
  return startKeyAndLocationPairs;

}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:TestSecIndexLoadBalancer.java

示例4: flush

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Flush a table or an individual region. Synchronous operation.
 * @param tableNameOrRegionName table or region to flush
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
public void flush(final byte[] tableNameOrRegionName) throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        flush(regionServerPair.getSecond(), regionServerPair.getFirst());
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs = MetaReader.getTableRegionsAndLocations(ct,
        tableName);
      for (Pair<HRegionInfo, ServerName> pair : pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          flush(pair.getSecond(), pair.getFirst());
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to flush " + pair.getFirst() + ": "
                + StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:HBaseAdmin.java

示例5: compact

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Compact a table or an individual region. Asynchronous operation.
 * @param tableNameOrRegionName table or region to compact
 * @param columnFamily column family within a table or region
 * @param major True if we are to do a major compaction.
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
private void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily,
    final boolean major) throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs = MetaReader.getTableRegionsAndLocations(ct,
        tableName);
      for (Pair<HRegionInfo, ServerName> pair : pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to" + (major ? " major" : "") + " compact " + pair.getFirst()
                + ": " + StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:HBaseAdmin.java

示例6: split

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Split a table or an individual region. Asynchronous operation.
 * @param tableNameOrRegionName table to region to split
 * @param splitPoint the explicit position to split on
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException interrupt exception occurred
 */
public void split(final byte[] tableNameOrRegionName, final byte[] splitPoint)
    throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs = MetaReader.getTableRegionsAndLocations(ct,
        tableName);
      for (Pair<HRegionInfo, ServerName> pair : pairs) {
        // May not be a server for a particular row
        if (pair.getSecond() == null) continue;
        HRegionInfo r = pair.getFirst();
        // check for parents
        if (r.isSplitParent()) continue;
        // if a split point given, only split that particular region
        if (splitPoint != null && !r.containsRow(splitPoint)) continue;
        // call out to region server to do split now
        split(pair.getSecond(), pair.getFirst(), splitPoint);
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:39,代码来源:HBaseAdmin.java

示例7: handleEnableTable

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private void handleEnableTable() throws IOException, KeeperException, InterruptedException {
  // I could check table is disabling and if so, not enable but require
  // that user first finish disabling but that might be obnoxious.

  // Set table enabling flag up in zk.
  this.assignmentManager.getZKTable().setEnablingTable(this.tableNameStr);
  boolean done = false;
  // Get the regions of this table. We're done when all listed
  // tables are onlined.
  List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations = MetaReader
      .getTableRegionsAndLocations(this.ct, tableName, true);
  int countOfRegionsInTable = tableRegionsAndLocations.size();
  List<HRegionInfo> regions = regionsToAssignWithServerName(tableRegionsAndLocations);
  int regionsCount = regions.size();
  if (regionsCount == 0) {
    done = true;
  }
  LOG.info("Table has " + countOfRegionsInTable + " regions of which " +
    regionsCount + " are offline.");
  BulkEnabler bd = new BulkEnabler(this.server, regions, countOfRegionsInTable, true);
  try {
    if (bd.bulkAssign()) {
      done = true;
    }
  } catch (InterruptedException e) {
    LOG.warn("Enable was interrupted");
    // Preserve the interrupt.
    Thread.currentThread().interrupt();
  }
  // Flip the table to enabled.
  if (done) this.assignmentManager.getZKTable().setEnabledTable(
    this.tableNameStr);
  LOG.info("Enabled table is done=" + done);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:35,代码来源:EnableTableHandler.java

示例8: requestMergeRegion

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private PairOfSameType<HRegionInfo> requestMergeRegion(
    HMaster master, TableName tablename,
    int regionAnum, int regionBnum) throws Exception {
  List<Pair<HRegionInfo, ServerName>> tableRegions = MetaReader
      .getTableRegionsAndLocations(master.getCatalogTracker(),
          tablename);
  HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
  HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
  TEST_UTIL.getHBaseAdmin().mergeRegions(
    regionA.getEncodedNameAsBytes(),
    regionB.getEncodedNameAsBytes(), false);
  return new PairOfSameType<HRegionInfo>(regionA, regionB);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:14,代码来源:TestRegionMergeTransactionOnCluster.java

示例9: createTableAndLoadData

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
private HTable createTableAndLoadData(HMaster master, TableName tablename,
    int numRegions) throws Exception {
  assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions);
  byte[][] splitRows = new byte[numRegions - 1][];
  for (int i = 0; i < splitRows.length; i++) {
    splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions];
  }

  HTable table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows);
  loadData(table);
  verifyRowCount(table, ROWSIZE);

  // sleep here is an ugly hack to allow region transitions to finish
  long timeout = System.currentTimeMillis() + waitTime;
  List<Pair<HRegionInfo, ServerName>> tableRegions;
  while (System.currentTimeMillis() < timeout) {
    tableRegions = MetaReader.getTableRegionsAndLocations(
        master.getCatalogTracker(), tablename);
    if (tableRegions.size() == numRegions)
      break;
    Thread.sleep(250);
  }

  tableRegions = MetaReader.getTableRegionsAndLocations(
      master.getCatalogTracker(), tablename);
  LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
  assertEquals(numRegions, tableRegions.size());
  return table;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:30,代码来源:TestRegionMergeTransactionOnCluster.java

示例10: flush

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Flush a table or an individual region.
 * Synchronous operation.
 *
 * @param tableNameOrRegionName table or region to flush
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
public void flush(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        flush(regionServerPair.getSecond(), regionServerPair.getFirst());
      }
    } else {
      final TableName tableName = checkTableExists(
          TableName.valueOf(tableNameOrRegionName), ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          flush(pair.getSecond(), pair.getFirst());
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to flush " + pair.getFirst() + ": " +
              StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:44,代码来源:HBaseAdmin.java

示例11: compact

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Compact a table or an individual region.
 * Asynchronous operation.
 *
 * @param tableNameOrRegionName table or region to compact
 * @param columnFamily column family within a table or region
 * @param major True if we are to do a major compaction.
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
private void compact(final byte[] tableNameOrRegionName,
  final byte[] columnFamily,final boolean major)
throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
      }
    } else {
      final TableName tableName =
          checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
              pair.getFirst() + ": " +
              StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:48,代码来源:HBaseAdmin.java

示例12: split

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Split a table or an individual region.
 * Asynchronous operation.
 *
 * @param tableNameOrRegionName table to region to split
 * @param splitPoint the explicit position to split on
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException interrupt exception occurred
 */
public void split(final byte[] tableNameOrRegionName,
    final byte [] splitPoint) throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
          throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
      }
    } else {
      final TableName tableName =
          checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        // May not be a server for a particular row
        if (pair.getSecond() == null) continue;
        HRegionInfo r = pair.getFirst();
        // check for parents
        if (r.isSplitParent()) continue;
        // if a split point given, only split that particular region
        if (splitPoint != null && !r.containsRow(splitPoint)) continue;
        // call out to region server to do split now
        split(pair.getSecond(), pair.getFirst(), splitPoint);
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:44,代码来源:HBaseAdmin.java

示例13: flush

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Flush a table or an individual region.
 * Synchronous operation.
 *
 * @param tableNameOrRegionName table or region to flush
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
public void flush(final byte [] tableNameOrRegionName)
throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        flush(regionServerPair.getSecond(), regionServerPair.getFirst());
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          flush(pair.getSecond(), pair.getFirst());
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to flush " + pair.getFirst() + ": " +
              StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:43,代码来源:HBaseAdmin.java

示例14: compact

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Compact a table or an individual region.
 * Asynchronous operation.
 *
 * @param tableNameOrRegionName table or region to compact
 * @param columnFamily column family within a table or region
 * @param major True if we are to do a major compaction.
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
private void compact(final byte [] tableNameOrRegionName,
  final byte[] columnFamily, final boolean major)
throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        if (pair.getFirst().isOffline()) continue;
        if (pair.getSecond() == null) continue;
        try {
          compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
        } catch (NotServingRegionException e) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
              pair.getFirst() + ": " +
              StringUtils.stringifyException(e));
          }
        }
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:47,代码来源:HBaseAdmin.java

示例15: split

import org.apache.hadoop.hbase.catalog.MetaReader; //导入方法依赖的package包/类
/**
 * Split a table or an individual region.
 * Asynchronous operation.
 *
 * @param tableNameOrRegionName table to region to split
 * @param splitPoint the explicit position to split on
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException interrupt exception occurred
 */
public void split(final byte [] tableNameOrRegionName,
    final byte [] splitPoint) throws IOException, InterruptedException {
  CatalogTracker ct = getCatalogTracker();
  try {
    Pair<HRegionInfo, ServerName> regionServerPair
      = getRegion(tableNameOrRegionName, ct);
    if (regionServerPair != null) {
      if (regionServerPair.getSecond() == null) {
          throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
      } else {
        split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
      }
    } else {
      final String tableName = tableNameString(tableNameOrRegionName, ct);
      List<Pair<HRegionInfo, ServerName>> pairs =
        MetaReader.getTableRegionsAndLocations(ct,
            tableName);
      for (Pair<HRegionInfo, ServerName> pair: pairs) {
        // May not be a server for a particular row
        if (pair.getSecond() == null) continue;
        HRegionInfo r = pair.getFirst();
        // check for parents
        if (r.isSplitParent()) continue;
        // if a split point given, only split that particular region
        if (splitPoint != null && !r.containsRow(splitPoint)) continue;
        // call out to region server to do split now
        split(pair.getSecond(), pair.getFirst(), splitPoint);
      }
    }
  } finally {
    cleanupCatalogTracker(ct);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:43,代码来源:HBaseAdmin.java


注:本文中的org.apache.hadoop.hbase.catalog.MetaReader.getTableRegionsAndLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。