当前位置: 首页>>代码示例>>Java>>正文


Java RestoreSnapshotHelper.copySnapshotForScanner方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.copySnapshotForScanner方法的典型用法代码示例。如果您正苦于以下问题:Java RestoreSnapshotHelper.copySnapshotForScanner方法的具体用法?Java RestoreSnapshotHelper.copySnapshotForScanner怎么用?Java RestoreSnapshotHelper.copySnapshotForScanner使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper的用法示例。


在下文中一共展示了RestoreSnapshotHelper.copySnapshotForScanner方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
private void init() throws IOException {
  final RestoreSnapshotHelper.RestoreMetaChanges meta =
    RestoreSnapshotHelper.copySnapshotForScanner(
      conf, fs, rootDir, restoreDir, snapshotName);
  final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();

  htd = meta.getTableDescriptor();
  regions = new ArrayList<HRegionInfo>(restoredRegions.size());
  for (HRegionInfo hri: restoredRegions) {
    if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
        hri.getStartKey(), hri.getEndKey())) {
      regions.add(hri);
    }
  }

  // sort for regions according to startKey.
  Collections.sort(regions);
  initScanMetrics(scan);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TableSnapshotScanner.java

示例2: setInput

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
/**
 * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
 * @param conf the job to configure
 * @param snapshotName the name of the snapshot to read from
 * @param restoreDir a temporary directory to restore the snapshot into. Current user should
 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
 * After the job is finished, restoreDir can be deleted.
 * @param numSplitsPerRegion how many input splits to generate per one region
 * @param splitAlgo SplitAlgorithm to be used when generating InputSplits
 * @throws IOException if an error occurs
 */
public static void setInput(Configuration conf, String snapshotName, Path restoreDir,
                            RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion)
        throws IOException {
  conf.set(SNAPSHOT_NAME_KEY, snapshotName);
  if (numSplitsPerRegion < 1) {
    throw new IllegalArgumentException("numSplits must be >= 1, " +
            "illegal numSplits : " + numSplitsPerRegion);
  }
  if (splitAlgo == null && numSplitsPerRegion > 1) {
    throw new IllegalArgumentException("Split algo can't be null when numSplits > 1");
  }
  if (splitAlgo != null) {
    conf.set(SPLIT_ALGO, splitAlgo.getClass().getName());
  }
  conf.setInt(NUM_SPLITS_PER_REGION, numSplitsPerRegion);
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

  // TODO: restore from record readers to parallelize.
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

  conf.set(RESTORE_DIR_KEY, restoreDir.toString());
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:TableSnapshotInputFormatImpl.java

示例3: init

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
private void init() throws IOException {
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
  SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);

  // load table descriptor
  htd = manifest.getTableDescriptor();

  List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
  if (regionManifests == null) {
    throw new IllegalArgumentException("Snapshot seems empty");
  }

  regions = new ArrayList<HRegionInfo>(regionManifests.size());
  for (SnapshotRegionManifest regionManifest : regionManifests) {
    // load region descriptor
    HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());

    if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
        hri.getStartKey(), hri.getEndKey())) {
      regions.add(hri);
    }
  }

  // sort for regions according to startKey.
  Collections.sort(regions);

  initScanMetrics(scan);

  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
    rootDir, restoreDir, snapshotName);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:33,代码来源:TableSnapshotScanner.java

示例4: setInput

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
/**
 * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
 * @param conf the job to configuration
 * @param snapshotName the name of the snapshot to read from
 * @param restoreDir a temporary directory to restore the snapshot into. Current user should
 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
 * After the job is finished, restoreDir can be deleted.
 * @throws IOException if an error occurs
 */
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
    throws IOException {
  conf.set(SNAPSHOT_NAME_KEY, snapshotName);

  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

  // TODO: restore from record readers to parallelize.
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

  conf.set(RESTORE_DIR_KEY, restoreDir.toString());
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:24,代码来源:TableSnapshotInputFormatImpl.java

示例5: init

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
private void init() throws IOException {
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);

  //load table descriptor
  htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);

  Set<String> snapshotRegionNames
    = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
  if (snapshotRegionNames == null) {
    throw new IllegalArgumentException("Snapshot seems empty");
  }

  regions = new ArrayList<HRegionInfo>(snapshotRegionNames.size());
  for (String regionName : snapshotRegionNames) {
    // load region descriptor
    Path regionDir = new Path(snapshotDir, regionName);
    HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
        regionDir);

    if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
        hri.getStartKey(), hri.getEndKey())) {
      regions.add(hri);
    }
  }

  // sort for regions according to startKey.
  Collections.sort(regions);

  initScanMetrics(scan);

  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
    rootDir, restoreDir, snapshotName);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:34,代码来源:TableSnapshotScanner.java

示例6: setInput

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
/**
 * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
 * @param conf the job to configure
 * @param snapshotName the name of the snapshot to read from
 * @param restoreDir a temporary directory to restore the snapshot into. Current user should
 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
 * After the job is finished, restoreDir can be deleted.
 * @throws IOException if an error occurs
 */
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
    throws IOException {
  conf.set(SNAPSHOT_NAME_KEY, snapshotName);

  Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
  FileSystem fs = rootDir.getFileSystem(conf);

  restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

  // TODO: restore from record readers to parallelize.
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

  conf.set(TABLE_DIR_KEY, restoreDir.toString());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:TableSnapshotInputFormatImpl.java

示例7: openWithRestoringSnapshot

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
private void openWithRestoringSnapshot() throws IOException {
  final RestoreSnapshotHelper.RestoreMetaChanges meta =
      RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
  final List<RegionInfo> restoredRegions = meta.getRegionsToAdd();

  htd = meta.getTableDescriptor();
  regions = new ArrayList<>(restoredRegions.size());
  restoredRegions.stream().filter(this::isValidRegion).sorted().forEach(r -> regions.add(r));
}
 
开发者ID:apache,项目名称:hbase,代码行数:10,代码来源:TableSnapshotScanner.java

示例8: restoreSnapshotForPeerCluster

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress)
    throws IOException {
  Configuration peerConf =
      HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX);
  FileSystem.setDefaultUri(peerConf, peerFSAddress);
  FSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress));
  FileSystem fs = FileSystem.get(peerConf);
  RestoreSnapshotHelper.copySnapshotForScanner(peerConf, fs, FSUtils.getRootDir(peerConf),
    new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName);
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:VerifyReplication.java

示例9: setInput

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
/**
 * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
 * @param conf the job to configuration
 * @param snapshotName the name of the snapshot to read from
 * @param restoreDir a temporary directory to restore the snapshot into. Current user should
 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
 * After the job is finished, restoreDir can be deleted.
 * @throws IOException if an error occurs
 */
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
    throws IOException {
  conf.set(SNAPSHOT_NAME_KEY, snapshotName);

  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

  // TODO: restore from record readers to parallelize.
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

  conf.set(TABLE_DIR_KEY, restoreDir.toString());
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:24,代码来源:TableSnapshotInputFormatImpl.java

示例10: restoreSnapshot

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
void restoreSnapshot(Configuration conf, String snapshotName, Path rootDir, Path restoreDir,
    FileSystem fs) throws IOException {
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:MultiTableSnapshotInputFormatImpl.java

示例11: testScannerWithRestoreScanner

import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入方法依赖的package包/类
@Test
public void testScannerWithRestoreScanner() throws Exception {
  setupCluster();
  TableName tableName = TableName.valueOf("testScanner");
  String snapshotName = "testScannerWithRestoreScanner";
  try {
    createTableAndSnapshot(UTIL, tableName, snapshotName, 50);
    Path restoreDir = UTIL.getDataTestDirOnTestFS(snapshotName);
    Scan scan = new Scan(bbb, yyy); // limit the scan

    Configuration conf = UTIL.getConfiguration();
    Path rootDir = FSUtils.getRootDir(conf);

    TableSnapshotScanner scanner0 =
        new TableSnapshotScanner(conf, restoreDir, snapshotName, scan);
    verifyScanner(scanner0, bbb, yyy);
    scanner0.close();

    // restore snapshot.
    RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

    // scan the snapshot without restoring snapshot
    TableSnapshotScanner scanner =
        new TableSnapshotScanner(conf, rootDir, restoreDir, snapshotName, scan, true);
    verifyScanner(scanner, bbb, yyy);
    scanner.close();

    // check whether the snapshot has been deleted by the close of scanner.
    scanner = new TableSnapshotScanner(conf, rootDir, restoreDir, snapshotName, scan, true);
    verifyScanner(scanner, bbb, yyy);
    scanner.close();

    // restore snapshot again.
    RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

    // check whether the snapshot has been deleted by the close of scanner.
    scanner = new TableSnapshotScanner(conf, rootDir, restoreDir, snapshotName, scan, true);
    verifyScanner(scanner, bbb, yyy);
    scanner.close();
  } finally {
    UTIL.getAdmin().deleteSnapshot(snapshotName);
    UTIL.deleteTable(tableName);
    tearDownCluster();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:46,代码来源:TestTableSnapshotScanner.java


注:本文中的org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.copySnapshotForScanner方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。