當前位置: 首頁>>代碼示例>>Java>>正文


Java HRegionInfo.getTable方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.getTable方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.getTable方法的具體用法?Java HRegionInfo.getTable怎麽用?Java HRegionInfo.getTable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HRegionInfo的用法示例。


在下文中一共展示了HRegionInfo.getTable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: deleteRegion

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Remove a region from all state maps.
 */
@VisibleForTesting
public synchronized void deleteRegion(final HRegionInfo hri) {
  String encodedName = hri.getEncodedName();
  regionsInTransition.remove(encodedName);
  regionStates.remove(encodedName);
  TableName table = hri.getTable();
  Map<String, RegionState> indexMap = regionStatesTableIndex.get(table);
  indexMap.remove(encodedName);
  if (indexMap.size() == 0)
    regionStatesTableIndex.remove(table);
  lastAssignments.remove(encodedName);
  ServerName sn = regionAssignments.remove(hri);
  if (sn != null) {
    Set<HRegionInfo> regions = serverHoldings.get(sn);
    regions.remove(hri);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:RegionStates.java

示例2: replicaRegionsNotRecordedInMeta

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Get a list of replica regions that are:
 * not recorded in meta yet. We might not have recorded the locations
 * for the replicas since the replicas may not have been online yet, master restarted
 * in the middle of assigning, ZK erased, etc.
 * @param regionsRecordedInMeta the list of regions we know are recorded in meta
 * either as a default, or, as the location of a replica
 * @param master
 * @return list of replica regions
 * @throws IOException
 */
public static List<HRegionInfo> replicaRegionsNotRecordedInMeta(
    Set<HRegionInfo> regionsRecordedInMeta, MasterServices master)throws IOException {
  List<HRegionInfo> regionsNotRecordedInMeta = new ArrayList<HRegionInfo>();
  for (HRegionInfo hri : regionsRecordedInMeta) {
    TableName table = hri.getTable();
    HTableDescriptor htd = master.getTableDescriptors().get(table);
    // look at the HTD for the replica count. That's the source of truth
    int desiredRegionReplication = htd.getRegionReplication();
    for (int i = 0; i < desiredRegionReplication; i++) {
      HRegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hri, i);
      if (regionsRecordedInMeta.contains(replica)) continue;
      regionsNotRecordedInMeta.add(replica);
    }
  }
  return regionsNotRecordedInMeta;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:AssignmentManager.java

示例3: prepare

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Does checks on split inputs.
 * @return <code>true</code> if the region is splittable else
 * <code>false</code> if it is not (e.g. its already closed, etc.).
 */
public boolean prepare() throws IOException {
  if (!this.parent.isSplittable()) return false;
  // Split key can be null if this region is unsplittable; i.e. has refs.
  if (this.splitrow == null) return false;
  HRegionInfo hri = this.parent.getRegionInfo();
  parent.prepareToSplit();
  // Check splitrow.
  byte [] startKey = hri.getStartKey();
  byte [] endKey = hri.getEndKey();
  if (Bytes.equals(startKey, splitrow) ||
      !this.parent.getRegionInfo().containsRow(splitrow)) {
    LOG.info("Split row is not inside region key range or is equal to " +
        "startkey: " + Bytes.toStringBinary(this.splitrow));
    return false;
  }
  long rid = getDaughterRegionIdTimestamp(hri);
  this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid);
  this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid);

  transition(SplitTransactionPhase.PREPARED);

  return true;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:SplitTransactionImpl.java

示例4: writeMarker

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private static long writeMarker(final WAL wal, final HTableDescriptor htd, final HRegionInfo hri,
    final WALEdit edit, final MultiVersionConcurrencyControl mvcc, final boolean sync)
throws IOException {
  // TODO: Pass in current time to use?
  WALKey key =
    new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), System.currentTimeMillis(), mvcc);
  // Add it to the log but the false specifies that we don't need to add it to the memstore
  long trx = MultiVersionConcurrencyControl.NONE;
  try {
    trx = wal.append(htd, hri, key, edit, false);
    if (sync) wal.sync(trx);
  } finally {
    // If you get hung here, is it a real WAL or a mocked WAL? If the latter, you need to
    // trip the latch that is inside in getWriteEntry up in your mock. See down in the append
    // called from onEvent in FSHLog.
    MultiVersionConcurrencyControl.WriteEntry we = key.getWriteEntry();
    if (mvcc != null && we != null) mvcc.complete(we);
  }
  return trx;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:WALUtil.java

示例5: hasFamilyQualifierPermission

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Returns <code>true</code> if the current user is allowed the given action
 * over at least one of the column qualifiers in the given column families.
 */
private boolean hasFamilyQualifierPermission(User user,
    Action perm,
    RegionCoprocessorEnvironment env,
    Map<byte[], ? extends Collection<byte[]>> familyMap)
  throws IOException {
  HRegionInfo hri = env.getRegion().getRegionInfo();
  TableName tableName = hri.getTable();

  if (user == null) {
    return false;
  }

  if (familyMap != null && familyMap.size() > 0) {
    // at least one family must be allowed
    for (Map.Entry<byte[], ? extends Collection<byte[]>> family :
        familyMap.entrySet()) {
      if (family.getValue() != null && !family.getValue().isEmpty()) {
        for (byte[] qualifier : family.getValue()) {
          if (authManager.matchPermission(user, tableName,
              family.getKey(), qualifier, perm)) {
            return true;
          }
        }
      } else {
        if (authManager.matchPermission(user, tableName, family.getKey(),
            perm)) {
          return true;
        }
      }
    }
  } else if (LOG.isDebugEnabled()) {
    LOG.debug("Empty family map passed for permission check");
  }

  return false;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:AccessController.java

示例6: getTableName

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private TableName getTableName(Region region) {
  HRegionInfo regionInfo = region.getRegionInfo();
  if (regionInfo != null) {
    return regionInfo.getTable();
  }
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:AccessController.java

示例7: addRegion

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void addRegion(HRegionInfo regionInfo) {
  // Process the region name to region info map
  regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo);

  // Process the table to region map
  TableName tableName = regionInfo.getTable();
  List<HRegionInfo> regionList = tableToRegionMap.get(tableName);
  if (regionList == null) {
    regionList = new ArrayList<HRegionInfo>();
  }
  // Add the current region info into the tableToRegionMap
  regionList.add(regionInfo);
  tableToRegionMap.put(tableName, regionList);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:SnapshotOfRegionAssignmentFromMeta.java

示例8: putRegionState

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private RegionState putRegionState(RegionState regionState) {
  HRegionInfo hri = regionState.getRegion();
  String encodedName = hri.getEncodedName();
  TableName table = hri.getTable();
  RegionState oldState = regionStates.put(encodedName, regionState);
  Map<String, RegionState> map = regionStatesTableIndex.get(table);
  if (map == null) {
    map = new HashMap<String, RegionState>();
    regionStatesTableIndex.put(table, map);
  }
  map.put(encodedName, regionState);
  return oldState;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:RegionStates.java

示例9: assignAllUserRegions

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Assigns all user regions, if any exist.  Used during cluster startup.
 * <p>
 * This is a synchronous call and will return once every region has been
 * assigned.  If anything fails, an exception is thrown and the cluster
 * should be shutdown.
 * @throws InterruptedException
 * @throws IOException
 */
private void assignAllUserRegions(Map<HRegionInfo, ServerName> allRegions)
    throws IOException, InterruptedException {
  if (allRegions == null || allRegions.isEmpty()) return;

  // Determine what type of assignment to do on startup
  boolean retainAssignment = server.getConfiguration().
    getBoolean("hbase.master.startup.retainassign", true);

  Set<HRegionInfo> regionsFromMetaScan = allRegions.keySet();
  if (retainAssignment) {
    assign(allRegions);
  } else {
    List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionsFromMetaScan);
    assign(regions);
  }

  for (HRegionInfo hri : regionsFromMetaScan) {
    TableName tableName = hri.getTable();
    if (!tableStateManager.isTableState(tableName,
        ZooKeeperProtos.Table.State.ENABLED)) {
      setEnabledTable(tableName);
    }
  }
  // assign all the replicas that were not recorded in the meta
  assign(replicaRegionsNotRecordedInMeta(regionsFromMetaScan, server));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:AssignmentManager.java

示例10: balance

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * @param plan Plan to execute.
 */
public void balance(final RegionPlan plan) {

  HRegionInfo hri = plan.getRegionInfo();
  TableName tableName = hri.getTable();
  if (tableStateManager.isTableState(tableName,
    ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
    LOG.info("Ignored moving region of disabling/disabled table "
      + tableName);
    return;
  }

  // Move the region only if it's assigned
  String encodedName = hri.getEncodedName();
  ReentrantLock lock = locker.acquireLock(encodedName);
  try {
    if (!regionStates.isRegionOnline(hri)) {
      RegionState state = regionStates.getRegionState(encodedName);
      LOG.info("Ignored moving region not assigned: " + hri + ", "
        + (state == null ? "not in region states" : state));
      return;
    }
    synchronized (this.regionPlans) {
      this.regionPlans.put(plan.getRegionName(), plan);
    }
    unassign(hri, false, plan.getDestination());
  } finally {
    lock.unlock();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:AssignmentManager.java

示例11: getMergedRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Get merged region info through the specified two regions
 * @param a merging region A
 * @param b merging region B
 * @return the merged region info
 */
public static HRegionInfo getMergedRegionInfo(final HRegionInfo a,
    final HRegionInfo b) {
  long rid = EnvironmentEdgeManager.currentTime();
  // Regionid is timestamp. Merged region's id can't be less than that of
  // merging regions else will insert at wrong location in hbase:meta
  if (rid < a.getRegionId() || rid < b.getRegionId()) {
    LOG.warn("Clock skew; merging regions id are " + a.getRegionId()
        + " and " + b.getRegionId() + ", but current time here is " + rid);
    rid = Math.max(a.getRegionId(), b.getRegionId()) + 1;
  }

  byte[] startKey = null;
  byte[] endKey = null;
  // Choose the smaller as start key
  if (a.compareTo(b) <= 0) {
    startKey = a.getStartKey();
  } else {
    startKey = b.getStartKey();
  }
  // Choose the bigger as end key
  if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
      || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
          && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) {
    endKey = a.getEndKey();
  } else {
    endKey = b.getEndKey();
  }

  // Merged region is sorted between two merging regions in META
  HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey,
      endKey, false, rid);
  return mergedRegionInfo;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:RegionMergeTransactionImpl.java

示例12: initialize

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Initialize the region assignment snapshot by scanning the hbase:meta table
 * @throws IOException
 */
public void initialize() throws IOException {
  LOG.info("Start to scan the hbase:meta for the current region assignment " +
    "snappshot");
  // TODO: at some point this code could live in the MetaTableAccessor
  Visitor v = new Visitor() {
    @Override
    public boolean visit(Result result) throws IOException {
      try {
        if (result ==  null || result.isEmpty()) return true;
        RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
        if (rl == null) return true;
        HRegionInfo hri = rl.getRegionLocation(0).getRegionInfo();
        if (hri == null) return true;
        if (hri.getTable() == null) return true;
        if (disabledTables.contains(hri.getTable())) {
          return true;
        }
        // Are we to include split parents in the list?
        if (excludeOfflinedSplitParents && hri.isSplit()) return true;
        HRegionLocation[] hrls = rl.getRegionLocations();

        // Add the current assignment to the snapshot for all replicas
        for (int i = 0; i < hrls.length; i++) {
          if (hrls[i] == null) continue;
          hri = hrls[i].getRegionInfo();
          if (hri == null) continue;
          addAssignment(hri, hrls[i].getServerName());
          addRegion(hri);
        }

        // the code below is to handle favored nodes
        byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
            FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
        if (favoredNodes == null) return true;
        // Add the favored nodes into assignment plan
        ServerName[] favoredServerList =
            FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
        // Add the favored nodes into assignment plan
        existingAssignmentPlan.updateFavoredNodesMap(hri,
            Arrays.asList(favoredServerList));
        return true;
      } catch (RuntimeException e) {
        LOG.error("Catche remote exception " + e.getMessage() +
            " when processing" + result);
        throw e;
      }
    }
  };
  // Scan hbase:meta to pick up user regions
  MetaTableAccessor.fullScan(connection, v);
  //regionToRegionServerMap = regions;
  LOG.info("Finished to scan the hbase:meta for the current region assignment" +
    "snapshot");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:59,代碼來源:SnapshotOfRegionAssignmentFromMeta.java

示例13: testNonLegacyWALKeysDoNotExplode

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test
public void testNonLegacyWALKeysDoNotExplode() throws Exception {
  TableName tableName = TableName.valueOf(TEST_TABLE);
  final HTableDescriptor htd = createBasic3FamilyHTD(Bytes
      .toString(TEST_TABLE));
  final HRegionInfo hri = new HRegionInfo(tableName, null, null);
  MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();

  fs.mkdirs(new Path(FSUtils.getTableDir(hbaseRootDir, tableName), hri.getEncodedName()));

  final Configuration newConf = HBaseConfiguration.create(this.conf);

  final WAL wal = wals.getWAL(UNSPECIFIED_REGION);
  final SampleRegionWALObserver newApi = getCoprocessor(wal, SampleRegionWALObserver.class);
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  final SampleRegionWALObserver oldApi = getCoprocessor(wal,
      SampleRegionWALObserver.Legacy.class);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("ensuring wal entries haven't happened before we start");
  assertFalse(newApi.isPreWALWriteCalled());
  assertFalse(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("writing to WAL with non-legacy keys.");
  final int countPerFamily = 5;
  for (HColumnDescriptor hcd : htd.getFamilies()) {
    addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
        EnvironmentEdgeManager.getDelegate(), wal, htd, mvcc);
  }

  LOG.debug("Verify that only the non-legacy CP saw edits.");
  assertTrue(newApi.isPreWALWriteCalled());
  assertTrue(newApi.isPostWALWriteCalled());
  assertFalse(newApi.isPreWALWriteDeprecatedCalled());
  assertFalse(newApi.isPostWALWriteDeprecatedCalled());
  // wish we could test that the log message happened :/
  assertFalse(oldApi.isPreWALWriteCalled());
  assertFalse(oldApi.isPostWALWriteCalled());
  assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
  assertFalse(oldApi.isPostWALWriteDeprecatedCalled());

  LOG.debug("reseting cp state.");
  newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
  oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);

  LOG.debug("write a log edit that supports legacy cps.");
  final long now = EnvironmentEdgeManager.currentTime();
  final WALKey legacyKey = new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), now);
  final WALEdit edit = new WALEdit();
  final byte[] nonce = Bytes.toBytes("1772");
  edit.add(new KeyValue(TEST_ROW, TEST_FAMILY[0], nonce, now, nonce));
  final long txid = wal.append(htd, hri, legacyKey, edit, true);
  wal.sync(txid);

  LOG.debug("Make sure legacy cps can see supported edits after having been skipped.");
  assertTrue("non-legacy WALObserver didn't see pre-write.", newApi.isPreWALWriteCalled());
  assertTrue("non-legacy WALObserver didn't see post-write.", newApi.isPostWALWriteCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy pre-write.",
      newApi.isPreWALWriteDeprecatedCalled());
  assertFalse("non-legacy WALObserver shouldn't have seen legacy post-write.",
      newApi.isPostWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see pre-write.", oldApi.isPreWALWriteCalled());
  assertTrue("legacy WALObserver didn't see post-write.", oldApi.isPostWALWriteCalled());
  assertTrue("legacy WALObserver didn't see legacy pre-write.",
      oldApi.isPreWALWriteDeprecatedCalled());
  assertTrue("legacy WALObserver didn't see legacy post-write.",
      oldApi.isPostWALWriteDeprecatedCalled());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:75,代碼來源:TestWALObserver.java

示例14: testReference

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
    BloomType.NONE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.

  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  hsf.closeReader(true);

  // Make a reference
  HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
  Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
    BloomType.NONE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:TestStoreFile.java

示例15: testReferenceToHFileLink

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * This test creates an hfile and then the dir structures and files to verify that references
 * to hfilelinks (created by snapshot clones) can be properly interpreted.
 */
public void testReferenceToHFileLink() throws IOException {
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  // adding legal table name chars to verify regex handles it.
  HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);
  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());

  // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
  HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
  HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
      hriClone);
  Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // create splits of the link.
  // <root>/clone/splitA/<cf>/<reftohfilelink>,
  // <root>/clone/splitB/<cf>/<reftohfilelink>
  HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
  HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
  StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
  f.createReader();
  Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
  Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
  f.closeReader(true);
  // OK test the thing
  FSUtils.logFileSystemState(fs, this.testDir, LOG);

  // There is a case where a file with the hfilelink pattern is actually a daughter
  // reference to a hfile link.  This code in StoreFile that handles this case.

  // Try to open store file from link
  StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  int count = 1;
  HFileScanner s = hsfA.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertTrue(count > 0); // read some rows here

  // Try to open store file from link
  StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  HFileScanner sB = hsfB.createReader().getScanner(false, false);
  sB.seekTo();
  
  //count++ as seekTo() will advance the scanner
  count++;
  while (sB.next()) {
    count++;
  }

  // read the rest of the rows
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:80,代碼來源:TestStoreFile.java


注:本文中的org.apache.hadoop.hbase.HRegionInfo.getTable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。