本文整理汇总了Java中org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper类的典型用法代码示例。如果您正苦于以下问题:Java RestoreSnapshotHelper类的具体用法?Java RestoreSnapshotHelper怎么用?Java RestoreSnapshotHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RestoreSnapshotHelper类属于org.apache.hadoop.hbase.snapshot包,在下文中一共展示了RestoreSnapshotHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void init() throws IOException {
final RestoreSnapshotHelper.RestoreMetaChanges meta =
RestoreSnapshotHelper.copySnapshotForScanner(
conf, fs, rootDir, restoreDir, snapshotName);
final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
htd = meta.getTableDescriptor();
regions = new ArrayList<HRegionInfo>(restoredRegions.size());
for (HRegionInfo hri: restoredRegions) {
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
regions.add(hri);
}
}
// sort for regions according to startKey.
Collections.sort(regions);
initScanMetrics(scan);
}
示例2: setInput
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param conf the job to configure
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user should
* have write permissions to this directory, and this should not be a subdirectory of rootdir.
* After the job is finished, restoreDir can be deleted.
* @param numSplitsPerRegion how many input splits to generate per one region
* @param splitAlgo SplitAlgorithm to be used when generating InputSplits
* @throws IOException if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir,
RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion)
throws IOException {
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
if (numSplitsPerRegion < 1) {
throw new IllegalArgumentException("numSplits must be >= 1, " +
"illegal numSplits : " + numSplitsPerRegion);
}
if (splitAlgo == null && numSplitsPerRegion > 1) {
throw new IllegalArgumentException("Split algo can't be null when numSplits > 1");
}
if (splitAlgo != null) {
conf.set(SPLIT_ALGO, splitAlgo.getClass().getName());
}
conf.setInt(NUM_SPLITS_PER_REGION, numSplitsPerRegion);
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
// TODO: restore from record readers to parallelize.
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(RESTORE_DIR_KEY, restoreDir.toString());
}
示例3: init
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void init() throws IOException {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
// load table descriptor
htd = manifest.getTableDescriptor();
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
if (regionManifests == null) {
throw new IllegalArgumentException("Snapshot seems empty");
}
regions = new ArrayList<HRegionInfo>(regionManifests.size());
for (SnapshotRegionManifest regionManifest : regionManifests) {
// load region descriptor
HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
regions.add(hri);
}
}
// sort for regions according to startKey.
Collections.sort(regions);
initScanMetrics(scan);
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
rootDir, restoreDir, snapshotName);
}
示例4: setInput
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param conf the job to configuration
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user should
* have write permissions to this directory, and this should not be a subdirectory of rootdir.
* After the job is finished, restoreDir can be deleted.
* @throws IOException if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
throws IOException {
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
// TODO: restore from record readers to parallelize.
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(RESTORE_DIR_KEY, restoreDir.toString());
}
示例5: init
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void init() throws IOException {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
//load table descriptor
htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
Set<String> snapshotRegionNames
= SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
if (snapshotRegionNames == null) {
throw new IllegalArgumentException("Snapshot seems empty");
}
regions = new ArrayList<HRegionInfo>(snapshotRegionNames.size());
for (String regionName : snapshotRegionNames) {
// load region descriptor
Path regionDir = new Path(snapshotDir, regionName);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
regionDir);
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
regions.add(hri);
}
}
// sort for regions according to startKey.
Collections.sort(regions);
initScanMetrics(scan);
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
rootDir, restoreDir, snapshotName);
}
示例6: setInput
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param conf the job to configure
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user should
* have write permissions to this directory, and this should not be a subdirectory of rootdir.
* After the job is finished, restoreDir can be deleted.
* @throws IOException if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
throws IOException {
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
// TODO: restore from record readers to parallelize.
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(TABLE_DIR_KEY, restoreDir.toString());
}
示例7: openWithRestoringSnapshot
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void openWithRestoringSnapshot() throws IOException {
final RestoreSnapshotHelper.RestoreMetaChanges meta =
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
final List<RegionInfo> restoredRegions = meta.getRegionsToAdd();
htd = meta.getTableDescriptor();
regions = new ArrayList<>(restoredRegions.size());
restoredRegions.stream().filter(this::isValidRegion).sorted().forEach(r -> regions.add(r));
}
示例8: restoreSnapshot
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Execute the on-disk Restore
* @param env MasterProcedureEnv
* @throws IOException
**/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
try {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(
env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
env.getMasterServices().getConfiguration(),
fs,
manifest,
modifiedTableDescriptor,
rootDir,
monitorException,
getMonitorStatus());
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
regionsToRestore = metaChanges.getRegionsToRestore();
regionsToRemove = metaChanges.getRegionsToRemove();
regionsToAdd = metaChanges.getRegionsToAdd();
parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
} catch (IOException e) {
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " failed in on-disk restore. Try re-running the restore command.";
LOG.error(msg, e);
monitorException.receive(
new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
}
示例9: restoreSnapshotAcl
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void restoreSnapshotAcl(final MasterProcedureEnv env) throws IOException {
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils
.isSecurityAvailable(env.getMasterServices().getConfiguration())) {
// restore acl of snapshot to table.
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, TableName.valueOf(snapshot.getTable()),
env.getMasterServices().getConfiguration());
}
}
示例10: restoreSnapshotAcl
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void restoreSnapshotAcl(MasterProcedureEnv env) throws IOException {
Configuration conf = env.getMasterServices().getConfiguration();
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf);
}
}
示例11: addRegionsToMeta
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Add regions to hbase:meta table.
* @param env MasterProcedureEnv
* @throws IOException
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
// TODO: parentsToChildrenPairMap is always empty, which makes updateMetaParentRegions()
// a no-op. This part seems unnecessary. Figure out. - Appy 12/21/17
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
tableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
}
示例12: restoreSnapshotForPeerCluster
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress)
throws IOException {
Configuration peerConf =
HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX);
FileSystem.setDefaultUri(peerConf, peerFSAddress);
FSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress));
FileSystem fs = FileSystem.get(peerConf);
RestoreSnapshotHelper.copySnapshotForScanner(peerConf, fs, FSUtils.getRootDir(peerConf),
new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName);
}
示例13: setInput
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param conf the job to configuration
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user should
* have write permissions to this directory, and this should not be a subdirectory of rootdir.
* After the job is finished, restoreDir can be deleted.
* @throws IOException if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir)
throws IOException {
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
// TODO: restore from record readers to parallelize.
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(TABLE_DIR_KEY, restoreDir.toString());
}
示例14: handleTableOperation
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
/**
* The restore table is executed in place.
* - The on-disk data will be restored - reference files are put in place without moving data
* - [if something fail here: you need to delete the table and re-run the restore]
* - hbase:meta will be updated
* - [if something fail here: you need to run hbck to fix hbase:meta entries]
* The passed in list gets changed in this method
*/
@Override
protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
Connection conn = masterServices.getConnection();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
TableName tableName = hTableDescriptor.getTableName();
try {
// 1. Update descriptor
this.masterServices.getTableDescriptors().add(hTableDescriptor);
// 2. Execute the on-disk Restore
LOG.debug("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(masterServices.getConfiguration(), fs,
snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
masterServices.getConfiguration(), fs, manifest,
this.hTableDescriptor, rootDir, monitor, status);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// 3. Forces all the RegionStates to be offline
//
// The AssignmentManager keeps all the region states around
// with no possibility to remove them, until the master is restarted.
// This means that a region marked as SPLIT before the restore will never be assigned again.
// To avoid having all states around all the regions are switched to the OFFLINE state,
// which is the same state that the regions will be after a delete table.
forceRegionsOffline(metaChanges);
// 4. Applies changes to hbase:meta
status.setStatus("Preparing to restore each region");
// 4.1 Removes the current set of regions from META
//
// By removing also the regions to restore (the ones present both in the snapshot
// and in the current state) we ensure that no extra fields are present in META
// e.g. with a simple add addRegionToMeta() the splitA and splitB attributes
// not overwritten/removed, so you end up with old informations
// that are not correct after the restore.
List<HRegionInfo> hrisToRemove = new LinkedList<HRegionInfo>();
if (metaChanges.hasRegionsToRemove()) hrisToRemove.addAll(metaChanges.getRegionsToRemove());
MetaTableAccessor.deleteRegions(conn, hrisToRemove);
// 4.2 Add the new set of regions to META
//
// At this point the old regions are no longer present in META.
// and the set of regions present in the snapshot will be written to META.
// All the information in hbase:meta are coming from the .regioninfo of each region present
// in the snapshot folder.
hris.clear();
if (metaChanges.hasRegionsToAdd()) hris.addAll(metaChanges.getRegionsToAdd());
MetaTableAccessor.addRegionsToMeta(conn, hris, hTableDescriptor.getRegionReplication());
if (metaChanges.hasRegionsToRestore()) {
MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore(),
hTableDescriptor.getRegionReplication());
}
metaChanges.updateMetaParentRegions(this.server.getConnection(), hris);
// At this point the restore is complete. Next step is enabling the table.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
" on table=" + tableName + " completed!");
} catch (IOException e) {
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " failed. Try re-running the restore command.";
LOG.error(msg, e);
monitor.receive(new ForeignException(masterServices.getServerName().toString(), e));
throw new RestoreSnapshotException(msg, e);
}
}
示例15: forceRegionsOffline
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; //导入依赖的package包/类
private void forceRegionsOffline(final RestoreSnapshotHelper.RestoreMetaChanges metaChanges) {
forceRegionsOffline(metaChanges.getRegionsToAdd());
forceRegionsOffline(metaChanges.getRegionsToRestore());
forceRegionsOffline(metaChanges.getRegionsToRemove());
}