當前位置: 首頁>>代碼示例>>Java>>正文


Java HRegionInfo.getHRegionInfo方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getHRegionInfo方法的具體用法?Java HRegionInfo.getHRegionInfo怎麽用?Java HRegionInfo.getHRegionInfo使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HRegionInfo的用法示例。


在下文中一共展示了HRegionInfo.getHRegionInfo方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: wipeOutMeta

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
protected void wipeOutMeta() throws IOException {
  // Mess it up by blowing up meta.
  Admin admin = TEST_UTIL.getHBaseAdmin();
  Scan s = new Scan();
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(s);
  List<Delete> dels = new ArrayList<Delete>();
  for (Result r : scanner) {
    HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
    if(info != null && !info.getTable().getNamespaceAsString()
        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
      Delete d = new Delete(r.getRow());
      dels.add(d);
      admin.unassign(r.getRow(), true);
    }
  }
  meta.delete(dels);
  scanner.close();
  meta.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:OfflineMetaRebuildTestCore.java

示例2: nextRegion

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private HRegionInfo nextRegion() throws IOException {
  try {
    Result results = getMetaRow();
    if (results == null) {
      return null;
    }
    HRegionInfo region = HRegionInfo.getHRegionInfo(results);
    if (region == null) {
      throw new NoSuchElementException("meta region entry missing " +
          Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
    }
    if (!region.getTable().equals(this.tableName)) {
      return null;
    }
    return region;
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("meta scanner error", e);
    metaScanner.close();
    throw e;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:HMerge.java

示例3: getMetaRow

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private Result getMetaRow() throws IOException {
  Result currentRow = metaScanner.next();
  boolean foundResult = false;
  while (currentRow != null) {
    LOG.info("Row: <" + Bytes.toStringBinary(currentRow.getRow()) + ">");
    byte[] regionInfoValue = currentRow.getValue(HConstants.CATALOG_FAMILY,
        HConstants.REGIONINFO_QUALIFIER);
    if (regionInfoValue == null || regionInfoValue.length == 0) {
      currentRow = metaScanner.next();
      continue;
    }
    HRegionInfo region = HRegionInfo.getHRegionInfo(currentRow);
    if (!region.getTable().equals(this.tableName)) {
      currentRow = metaScanner.next();
      continue;
    }
    foundResult = true;
    break;
  }
  return foundResult ? currentRow : null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:HMerge.java

示例4: getRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
  try {
    byte[] row = getBytes(searchRow);
    Result startRowResult =
        getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY);

    if (startRowResult == null) {
      throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
                            + Bytes.toStringBinary(row));
    }

    // find region start and end keys
    HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(startRowResult);
    if (regionInfo == null) {
      throw new IOException("HRegionInfo REGIONINFO was null or " +
                            " empty in Meta for row="
                            + Bytes.toStringBinary(row));
    }
    TRegionInfo region = new TRegionInfo();
    region.setStartKey(regionInfo.getStartKey());
    region.setEndKey(regionInfo.getEndKey());
    region.id = regionInfo.getRegionId();
    region.setName(regionInfo.getRegionName());
    region.version = regionInfo.getVersion();

    // find region assignment to server
    ServerName serverName = HRegionInfo.getServerName(startRowResult);
    if (serverName != null) {
      region.setServerName(Bytes.toBytes(serverName.getHostname()));
      region.port = serverName.getPort();
    }
    return region;
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:ThriftServerRunner.java

示例5: addToEachStartKey

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  HTable meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    // If start key, add 'aaa'.
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestZKBasedOpenCloseRegion.java

示例6: addToEachStartKey

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  Table t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  Table meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if (!hri.getTable().equals(TABLENAME)) {
      continue;
    }

    // If start key, add 'aaa'.
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:TestMasterTransitions.java

示例7: getRegion

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * @param regionName Name of a region.
 * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
 *  a verified region name (we call {@link
 *  MetaTableAccessor#getRegion(HConnection, byte[])}
 *  else null.
 * Throw IllegalArgumentException if <code>regionName</code> is null.
 * @throws IOException
 */
Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
  if (regionName == null) {
    throw new IllegalArgumentException("Pass a table name or region name");
  }
  Pair<HRegionInfo, ServerName> pair =
    MetaTableAccessor.getRegion(connection, regionName);
  if (pair == null) {
    final AtomicReference<Pair<HRegionInfo, ServerName>> result =
      new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
    final String encodedName = Bytes.toString(regionName);
    MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
      @Override
      public boolean processRow(Result data) throws IOException {
        HRegionInfo info = HRegionInfo.getHRegionInfo(data);
        if (info == null) {
          LOG.warn("No serialized HRegionInfo in " + data);
          return true;
        }
        RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
        boolean matched = false;
        ServerName sn = null;
        for (HRegionLocation h : rl.getRegionLocations()) {
          if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
            sn = h.getServerName();
            info = h.getRegionInfo();
            matched = true;
          }
        }
        if (!matched) return true;
        result.set(new Pair<HRegionInfo, ServerName>(info, sn));
        return false; // found the region, stop
      }
    };

    MetaScanner.metaScan(connection, visitor, null);
    pair = result.get();
  }
  return pair;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:49,代碼來源:HBaseAdmin.java

示例8: getMergedRegionsAndSplitParents

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Scans hbase:meta and returns a number of scanned rows, and a map of merged
 * regions, and an ordered map of split parents. if the given table name is
 * null, return merged regions and split parents of all tables, else only the
 * specified table
 * @param tableName null represents all tables
 * @return triple of scanned rows, and map of merged regions, and map of split
 *         parent regioninfos
 * @throws IOException
 */
Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
    final TableName tableName) throws IOException {
  final boolean isTableSpecified = (tableName != null);
  // TODO: Only works with single hbase:meta region currently.  Fix.
  final AtomicInteger count = new AtomicInteger(0);
  // Keep Map of found split parents.  There are candidates for cleanup.
  // Use a comparator that has split parents come before its daughters.
  final Map<HRegionInfo, Result> splitParents =
    new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
  final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
  // This visitor collects split parents and counts rows in the hbase:meta table

  MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() {
    @Override
    public boolean processRow(Result r) throws IOException {
      if (r == null || r.isEmpty()) return true;
      count.incrementAndGet();
      HRegionInfo info = HRegionInfo.getHRegionInfo(r);
      if (info == null) return true; // Keep scanning
      if (isTableSpecified
          && info.getTable().compareTo(tableName) > 0) {
        // Another table, stop scanning
        return false;
      }
      if (info.isSplitParent()) splitParents.put(info, r);
      if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
        mergedRegions.put(info, r);
      }
      // Returning true means "keep scanning"
      return true;
    }
  };

  // Run full scan of hbase:meta catalog table passing in our custom visitor with
  // the start row
  MetaScanner.metaScan(this.connection, visitor, tableName);

  return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
      count.get(), mergedRegions, splitParents);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:CatalogJanitor.java

示例9: testCleanMergeReference

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test
public void testCleanMergeReference() throws Exception {
  LOG.info("Starting testCleanMergeReference");
  admin.enableCatalogJanitor(false);
  try {
    final TableName tableName =
        TableName.valueOf("testCleanMergeReference");
    // Create table and load data.
    Table table = createTableAndLoadData(master, tableName);
    // Merge 1st and 2nd region
    mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1,
        INITIAL_REGION_NUM - 1);
    verifyRowCount(table, ROWSIZE);
    table.close();

    List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
        .getTableRegionsAndLocations(master.getZooKeeper(), master.getConnection(), tableName);
    HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
    HTableDescriptor tableDescritor = master.getTableDescriptors().get(
        tableName);
    Result mergedRegionResult = MetaTableAccessor.getRegionResult(
      master.getConnection(), mergedRegionInfo.getRegionName());

    // contains merge reference in META
    assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
        HConstants.MERGEA_QUALIFIER) != null);
    assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
        HConstants.MERGEB_QUALIFIER) != null);

    // merging regions' directory are in the file system all the same
    HRegionInfo regionA = HRegionInfo.getHRegionInfo(mergedRegionResult,
        HConstants.MERGEA_QUALIFIER);
    HRegionInfo regionB = HRegionInfo.getHRegionInfo(mergedRegionResult,
        HConstants.MERGEB_QUALIFIER);
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    Path rootDir = master.getMasterFileSystem().getRootDir();

    Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable());
    Path regionAdir = new Path(tabledir, regionA.getEncodedName());
    Path regionBdir = new Path(tabledir, regionB.getEncodedName());
    assertTrue(fs.exists(regionAdir));
    assertTrue(fs.exists(regionBdir));

    admin.compactRegion(mergedRegionInfo.getRegionName());
    // wait until merged region doesn't have reference file
    long timeout = System.currentTimeMillis() + waitTime;
    HRegionFileSystem hrfs = new HRegionFileSystem(
        TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
    while (System.currentTimeMillis() < timeout) {
      if (!hrfs.hasReferences(tableDescritor)) {
        break;
      }
      Thread.sleep(50);
    }
    assertFalse(hrfs.hasReferences(tableDescritor));

    // run CatalogJanitor to clean merge references in hbase:meta and archive the
    // files of merging regions
    int cleaned = admin.runCatalogScan();
    assertTrue(cleaned > 0);
    assertFalse(fs.exists(regionAdir));
    assertFalse(fs.exists(regionBdir));

    mergedRegionResult = MetaTableAccessor.getRegionResult(
      master.getConnection(), mergedRegionInfo.getRegionName());
    assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
        HConstants.MERGEA_QUALIFIER) != null);
    assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
        HConstants.MERGEB_QUALIFIER) != null);

  } finally {
    admin.enableCatalogJanitor(true);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:75,代碼來源:TestRegionMergeTransactionOnCluster.java

示例10: getHRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Returns HRegionInfo object from the column
 * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
 * table Result.
 * @param data a Result object from the catalog table scan
 * @return HRegionInfo or null
 * @deprecated Use {@link org.apache.hadoop.hbase.MetaTableAccessor#getRegionLocations(Result)}
 */
@Deprecated
public static HRegionInfo getHRegionInfo(Result data) {
  return HRegionInfo.getHRegionInfo(data);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:MetaScanner.java


注:本文中的org.apache.hadoop.hbase.HRegionInfo.getHRegionInfo方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。