本文整理汇总了Java中org.apache.hadoop.hbase.backup.HFileArchiver类的典型用法代码示例。如果您正苦于以下问题:Java HFileArchiver类的具体用法?Java HFileArchiver怎么用?Java HFileArchiver使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HFileArchiver类属于org.apache.hadoop.hbase.backup包,在下文中一共展示了HFileArchiver类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteFamilyFromFS
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
if (fs.exists(familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
}
示例2: cleanMergeRegion
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* If merged region no longer holds reference to the merge regions, archive
* merge region on hdfs and perform deleting references in hbase:meta
* @param mergedRegion
* @param regionA
* @param regionB
* @return true if we delete references in merged region on hbase:meta and archive
* the files on the file system
* @throws IOException
*/
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
+ regionB.getRegionNameAsString()
+ " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
mergedRegion);
return true;
}
return false;
}
示例3: checkTempDir
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Make sure the hbase temp directory exists and is empty.
* NOTE that this method is only executed once just after the master becomes the active one.
*/
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
throws IOException {
// If the temp directory exists, clear the content (left over, from the previous run)
if (fs.exists(tmpdir)) {
// Archive table in temp, maybe left over from failed deletion,
// if not the cleaner will take care of them.
for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
}
}
if (!fs.delete(tmpdir, true)) {
throw new IOException("Unable to clean the temp directory: " + tmpdir);
}
}
// Create the temp directory
if (!fs.mkdirs(tmpdir)) {
throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
}
}
示例4: deleteRegionFromFileSystem
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Remove the region from the table directory, archiving the region's hfiles.
*
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} from which to remove the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link HRegionInfo} for region to be deleted
* @throws IOException if the request cannot be completed
*/
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
final Path tableDir, final HRegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regionDir);
}
// Archive region
Path rootDir = FSUtils.getRootDir(conf);
HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
// Delete empty region dir
if (!fs.delete(regionDir, true)) {
LOG.warn("Failed delete of " + regionDir);
}
}
示例5: checkTempDir
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Make sure the hbase temp directory exists and is empty.
* NOTE that this method is only executed once just after the master becomes the active one.
*/
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
throws IOException {
// If the temp directory exists, clear the content (left over, from the previous run)
if (fs.exists(tmpdir)) {
// Archive table in temp, maybe left over from failed deletion,
// if not the cleaner will take care of them.
for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
}
}
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tmpdir)) {
throw new IOException("Unable to clean the temp directory: " + tmpdir);
}
}
// Create the temp directory
if (!HBaseFileSystem.makeDirOnFileSystem(fs, tmpdir)) {
throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
}
}
示例6: deleteFamilyFromFS
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = new Path(rootdir, region.getTableNameAsString());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
示例7: deleteFamilyFromFS
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
示例8: deleteRegionFromFileSystem
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Remove the region from the table directory, archiving the region's hfiles.
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} from which to remove the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link HRegionInfo} for region to be deleted
* @throws IOException if the request cannot be completed
*/
public static void deleteRegionFromFileSystem(final Configuration conf,
final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regionDir);
}
// Archive region
Path rootDir = FSUtils.getRootDir(conf);
HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
// Delete empty region dir
if (!fs.delete(regionDir, true)) {
LOG.warn("Failed delete of " + regionDir);
}
}
示例9: cleanMergeRegion
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* If merged region no longer holds reference to the merge regions, archive
* merge region on hdfs and perform deleting references in hbase:meta
* @param mergedRegion
* @param regionA
* @param regionB
* @return true if we delete references in merged region on hbase:meta and archive
* the files on the file system
* @throws IOException
*/
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
+ regionB.getRegionNameAsString()
+ " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
return true;
}
return false;
}
示例10: deleteFamilyFromFS
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
public void deleteFamilyFromFS(Path rootDir, RegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = FSUtils.getTableDir(rootDir, region.getTable());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
if (fs.exists(familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
}
示例11: deleteRegionFromFileSystem
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Remove the region from the table directory, archiving the region's hfiles.
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} from which to remove the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link RegionInfo} for region to be deleted
* @throws IOException if the request cannot be completed
*/
public static void deleteRegionFromFileSystem(final Configuration conf,
final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regionDir);
}
// Archive region
Path rootDir = FSUtils.getRootDir(conf);
HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
// Delete empty region dir
if (!fs.delete(regionDir, true)) {
LOG.warn("Failed delete of " + regionDir);
}
}
示例12: deleteFamilyFromFS
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = new Path(rootdir, region.getTableNameAsString());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
示例13: cleanParent
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* If daughters no longer hold reference to the parents, delete the parent.
* @param parent HRegionInfo of split offlined parent
* @param rowContent Content of <code>parent</code> row in
* <code>metaRegionName</code>
* @return True if we removed <code>parent</code> from meta table and from
* the filesystem.
* @throws IOException
*/
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
boolean result = false;
// Check whether it is a merged region and not clean reference
// No necessary to check MERGEB_QUALIFIER because these two qualifiers will
// be inserted/deleted together
if (rowContent.getValue(HConstants.CATALOG_FAMILY,
HConstants.MERGEA_QUALIFIER) != null) {
// wait cleaning merge region first
return result;
}
// Run checks on each daughter split.
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
if (hasNoReferences(a) && hasNoReferences(b)) {
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
" because daughter splits no longer hold references");
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
MetaTableAccessor.deleteRegion(this.connection, parent);
result = true;
}
return result;
}
示例14: removeHdfsRegions
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Remove specified regions from the file-system, using the archiver.
*/
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
throws IOException {
if (regions == null || regions.size() == 0) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final HRegionInfo hri) throws IOException {
HFileArchiver.archiveRegion(conf, fs, hri);
}
});
}
示例15: deleteFamily
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入依赖的package包/类
/**
* Remove the region family from disk, archiving the store files.
*
* @param familyName Column Family Name
* @throws IOException if an error occours during the archiving
*/
public void deleteFamily(final String familyName) throws IOException {
// archive family store files
HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
// delete the family folder
Path familyDir = getStoreDir(familyName);
if (fs.exists(familyDir) && !deleteDir(familyDir)) throw new IOException(
"Could not delete family " + familyName + " from FileSystem for region " + regionInfoForFs
.getRegionNameAsString() + "(" + regionInfoForFs.getEncodedName() + ")");
}