當前位置: 首頁>>代碼示例>>Java>>正文


Java HRegionInfo.getEndKey方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getEndKey方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getEndKey方法的具體用法?Java HRegionInfo.getEndKey怎麽用?Java HRegionInfo.getEndKey使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HRegionInfo的用法示例。


在下文中一共展示了HRegionInfo.getEndKey方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: prepare

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Does checks on split inputs.
 * @return <code>true</code> if the region is splittable else
 * <code>false</code> if it is not (e.g. its already closed, etc.).
 */
public boolean prepare() throws IOException {
  if (!this.parent.isSplittable()) return false;
  // Split key can be null if this region is unsplittable; i.e. has refs.
  if (this.splitrow == null) return false;
  HRegionInfo hri = this.parent.getRegionInfo();
  parent.prepareToSplit();
  // Check splitrow.
  byte [] startKey = hri.getStartKey();
  byte [] endKey = hri.getEndKey();
  if (Bytes.equals(startKey, splitrow) ||
      !this.parent.getRegionInfo().containsRow(splitrow)) {
    LOG.info("Split row is not inside region key range or is equal to " +
        "startkey: " + Bytes.toStringBinary(this.splitrow));
    return false;
  }
  long rid = getDaughterRegionIdTimestamp(hri);
  this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid);
  this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid);

  transition(SplitTransactionPhase.PREPARED);

  return true;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:SplitTransactionImpl.java

示例2: verifyBounds

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void verifyBounds(List<byte[]> expectedBounds, TableName tableName)
        throws Exception {
    // Get region boundaries from the cluster and verify their endpoints
    final Configuration conf = UTIL.getConfiguration();
    final int numRegions = expectedBounds.size()-1;
    final HTable hTable = new HTable(conf, tableName);
    final Map<HRegionInfo, ServerName> regionInfoMap = hTable.getRegionLocations();
    assertEquals(numRegions, regionInfoMap.size());
    for (Map.Entry<HRegionInfo, ServerName> entry: regionInfoMap.entrySet()) {
        final HRegionInfo regionInfo = entry.getKey();
        byte[] regionStart = regionInfo.getStartKey();
        byte[] regionEnd = regionInfo.getEndKey();

        // This region's start key should be one of the region boundaries
        int startBoundaryIndex = indexOfBytes(expectedBounds, regionStart);
        assertNotSame(-1, startBoundaryIndex);

        // This region's end key should be the region boundary that comes
        // after the starting boundary.
        byte[] expectedRegionEnd = expectedBounds.get(
                startBoundaryIndex+1);
        assertEquals(0, Bytes.compareTo(regionEnd, expectedRegionEnd));
    }
    hTable.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:TestRegionSplitter.java

示例3: verifyTableRegions

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
void verifyTableRegions(Set<HRegionInfo> regions) {
  log("Verifying " + regions.size() + " regions: " + regions);

  byte[][] startKeys = new byte[regions.size()][];
  byte[][] endKeys = new byte[regions.size()][];

  int i=0;
  for (HRegionInfo region : regions) {
    startKeys[i] = region.getStartKey();
    endKeys[i] = region.getEndKey();
    i++;
  }

  Pair<byte[][], byte[][]> keys = new Pair<byte[][], byte[][]>(startKeys, endKeys);
  verifyStartEndKeys(keys);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestEndToEndSplitTransaction.java

示例4: cloneRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
  HRegionInfo regionInfo = new HRegionInfo(tableName,
                    snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
                    snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
  regionInfo.setOffline(snapshotRegionInfo.isOffline());
  return regionInfo;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:RestoreSnapshotHelper.java

示例5: getMergedRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Get merged region info through the specified two regions
 * @param a merging region A
 * @param b merging region B
 * @return the merged region info
 */
public static HRegionInfo getMergedRegionInfo(final HRegionInfo a,
    final HRegionInfo b) {
  long rid = EnvironmentEdgeManager.currentTime();
  // Regionid is timestamp. Merged region's id can't be less than that of
  // merging regions else will insert at wrong location in hbase:meta
  if (rid < a.getRegionId() || rid < b.getRegionId()) {
    LOG.warn("Clock skew; merging regions id are " + a.getRegionId()
        + " and " + b.getRegionId() + ", but current time here is " + rid);
    rid = Math.max(a.getRegionId(), b.getRegionId()) + 1;
  }

  byte[] startKey = null;
  byte[] endKey = null;
  // Choose the smaller as start key
  if (a.compareTo(b) <= 0) {
    startKey = a.getStartKey();
  } else {
    startKey = b.getStartKey();
  }
  // Choose the bigger as end key
  if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
      || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
          && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) {
    endKey = a.getEndKey();
  } else {
    endKey = b.getEndKey();
  }

  // Merged region is sorted between two merging regions in META
  HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey,
      endKey, false, rid);
  return mergedRegionInfo;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:RegionMergeTransactionImpl.java

示例6: rowIsInRange

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static boolean rowIsInRange(HRegionInfo info, final byte[] row, final int offset,
    final short length) {
  return ((info.getStartKey().length == 0) || (
      Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length, row, offset, length)
          <= 0)) && ((info.getEndKey().length == 0) || (
      Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:HRegion.java

示例7: compareRegionInfosWithoutReplicaId

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private static int compareRegionInfosWithoutReplicaId(HRegionInfo regionInfoA,
    HRegionInfo regionInfoB) {
  int result = regionInfoA.getTable().compareTo(regionInfoB.getTable());
  if (result != 0) {
    return result;
  }

  // Compare start keys.
  result = Bytes.compareTo(regionInfoA.getStartKey(), regionInfoB.getStartKey());
  if (result != 0) {
    return result;
  }

  // Compare end keys.
  result = Bytes.compareTo(regionInfoA.getEndKey(), regionInfoB.getEndKey());

  if (result != 0) {
    if (regionInfoA.getStartKey().length != 0
            && regionInfoA.getEndKey().length == 0) {
        return 1; // this is last region
    }
    if (regionInfoB.getStartKey().length != 0
            && regionInfoB.getEndKey().length == 0) {
        return -1; // o is the last region
    }
    return result;
  }

  // regionId is usually milli timestamp -- this defines older stamps
  // to be "smaller" than newer stamps in sort order.
  if (regionInfoA.getRegionId() > regionInfoB.getRegionId()) {
    return 1;
  } else if (regionInfoA.getRegionId() < regionInfoB.getRegionId()) {
    return -1;
  }
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:RegionReplicaUtil.java

示例8: getStartEndKeys

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@VisibleForTesting
Pair<byte[][], byte[][]> getStartEndKeys(List<RegionLocations> regions) {
  final byte[][] startKeyList = new byte[regions.size()][];
  final byte[][] endKeyList = new byte[regions.size()][];

  for (int i = 0; i < regions.size(); i++) {
    HRegionInfo region = regions.get(i).getRegionLocation().getRegionInfo();
    startKeyList[i] = region.getStartKey();
    endKeyList[i] = region.getEndKey();
  }

  return new Pair<>(startKeyList, endKeyList);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:HRegionLocator.java

示例9: doMetaScanResponse

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
static ScanResponse doMetaScanResponse(final SortedMap<byte [], Pair<HRegionInfo, ServerName>> meta,
    final AtomicLong sequenceids, final ScanRequest request) {
  ScanResponse.Builder builder = ScanResponse.newBuilder();
  int max = request.getNumberOfRows();
  int count = 0;
  Map<byte [], Pair<HRegionInfo, ServerName>> tail =
    request.hasScan()? meta.tailMap(request.getScan().getStartRow().toByteArray()): meta;
    ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
  for (Map.Entry<byte [], Pair<HRegionInfo, ServerName>> e: tail.entrySet()) {
    // Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only.
    if (max <= 0) break;
    if (++count > max) break;
    HRegionInfo hri = e.getValue().getFirst();
    ByteString row = ByteStringer.wrap(hri.getRegionName());
    resultBuilder.clear();
    resultBuilder.addCell(getRegionInfo(row, hri));
    resultBuilder.addCell(getServer(row, e.getValue().getSecond()));
    resultBuilder.addCell(getStartCode(row));
    builder.addResults(resultBuilder.build());
    // Set more to false if we are on the last region in table.
    if (hri.getEndKey().length <= 0) builder.setMoreResults(false);
    else builder.setMoreResults(true);
  }
  // If no scannerid, set one.
  builder.setScannerId(request.hasScannerId()?
    request.getScannerId(): sequenceids.incrementAndGet());
  return builder.build();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestClientNoCluster.java

示例10: checkTableInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
void checkTableInfo(TableInfoModel model) {
  assertEquals(model.getName(), TABLE.getNameAsString());
  Iterator<TableRegionModel> regions = model.getRegions().iterator();
  assertTrue(regions.hasNext());
  while (regions.hasNext()) {
    TableRegionModel region = regions.next();
    boolean found = false;
    for (HRegionLocation e: regionMap) {
      HRegionInfo hri = e.getRegionInfo();
      String hriRegionName = hri.getRegionNameAsString();
      String regionName = region.getName();
      if (hriRegionName.equals(regionName)) {
        found = true;
        byte[] startKey = hri.getStartKey();
        byte[] endKey = hri.getEndKey();
        ServerName serverName = e.getServerName();
        InetSocketAddress sa =
            new InetSocketAddress(serverName.getHostname(), serverName.getPort());
        String location = sa.getHostName() + ":" +
          Integer.valueOf(sa.getPort());
        assertEquals(hri.getRegionId(), region.getId());
        assertTrue(Bytes.equals(startKey, region.getStartKey()));
        assertTrue(Bytes.equals(endKey, region.getEndKey()));
        assertEquals(location, region.getLocation());
        break;
      }
    }
    assertTrue(found);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestTableResource.java

示例11: checkRegionBoundaries

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public void checkRegionBoundaries() {
  try {
    ByteArrayComparator comparator = new ByteArrayComparator();
    List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), connection, false);
    final RegionBoundariesInformation currentRegionBoundariesInformation =
        new RegionBoundariesInformation();
    Path hbaseRoot = FSUtils.getRootDir(getConf());
    for (HRegionInfo regionInfo : regions) {
      Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
      currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
      // For each region, get the start and stop key from the META and compare them to the
      // same information from the Stores.
      Path path = new Path(tableDir, regionInfo.getEncodedName());
      FileSystem fs = path.getFileSystem(getConf());
      FileStatus[] files = fs.listStatus(path);
      // For all the column families in this region...
      byte[] storeFirstKey = null;
      byte[] storeLastKey = null;
      for (FileStatus file : files) {
        String fileName = file.getPath().toString();
        fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
        if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
          FileStatus[] storeFiles = fs.listStatus(file.getPath());
          // For all the stores in this column family.
          for (FileStatus storeFile : storeFiles) {
            HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
                getConf()), getConf());
            if ((reader.getFirstKey() != null)
                && ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
                    reader.getFirstKey()) > 0))) {
              storeFirstKey = reader.getFirstKey();
            }
            if ((reader.getLastKey() != null)
                && ((storeLastKey == null) || (comparator.compare(storeLastKey,
                    reader.getLastKey())) < 0)) {
              storeLastKey = reader.getLastKey();
            }
            reader.close();
          }
        }
      }
      currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
      currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
      currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
      currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
      if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
        currentRegionBoundariesInformation.metaFirstKey = null;
      if (currentRegionBoundariesInformation.metaLastKey.length == 0)
        currentRegionBoundariesInformation.metaLastKey = null;

      // For a region to be correct, we need the META start key to be smaller or equal to the
      // smallest start key from all the stores, and the start key from the next META entry to
      // be bigger than the last key from all the current stores. First region start key is null;
      // Last region end key is null; some regions can be empty and not have any store.

      boolean valid = true;
      // Checking start key.
      if ((currentRegionBoundariesInformation.storesFirstKey != null)
          && (currentRegionBoundariesInformation.metaFirstKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
              currentRegionBoundariesInformation.metaFirstKey) >= 0;
      }
      // Checking stop key.
      if ((currentRegionBoundariesInformation.storesLastKey != null)
          && (currentRegionBoundariesInformation.metaLastKey != null)) {
        valid = valid
            && comparator.compare(currentRegionBoundariesInformation.storesLastKey,
              currentRegionBoundariesInformation.metaLastKey) < 0;
      }
      if (!valid) {
        errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
          tablesInfo.get(regionInfo.getTable()));
        LOG.warn("Region's boundaries not alligned between stores and META for:");
        LOG.warn(currentRegionBoundariesInformation);
      }
    }
  } catch (IOException e) {
    LOG.error(e);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:82,代碼來源:HBaseFsck.java


注:本文中的org.apache.hadoop.hbase.HRegionInfo.getEndKey方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。