本文整理汇总了Java中org.apache.hadoop.hbase.HBaseFileSystem.deleteDirFromFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseFileSystem.deleteDirFromFileSystem方法的具体用法?Java HBaseFileSystem.deleteDirFromFileSystem怎么用?Java HBaseFileSystem.deleteDirFromFileSystem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseFileSystem
的用法示例。
在下文中一共展示了HBaseFileSystem.deleteDirFromFileSystem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkTempDir
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Make sure the hbase temp directory exists and is empty.
* NOTE that this method is only executed once just after the master becomes the active one.
*/
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
throws IOException {
// If the temp directory exists, clear the content (left over, from the previous run)
if (fs.exists(tmpdir)) {
// Archive table in temp, maybe left over from failed deletion,
// if not the cleaner will take care of them.
for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
}
}
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tmpdir)) {
throw new IOException("Unable to clean the temp directory: " + tmpdir);
}
}
// Create the temp directory
if (!HBaseFileSystem.makeDirOnFileSystem(fs, tmpdir)) {
throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
}
}
示例2: deleteFamilyFromFS
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = new Path(rootdir, region.getTableNameAsString());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
示例3: finishSplitLogFile
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public static void finishSplitLogFile(Path rootdir, Path oldLogDir,
String logfile, Configuration conf) throws IOException {
List<Path> processedLogs = new ArrayList<Path>();
List<Path> corruptedLogs = new ArrayList<Path>();
FileSystem fs;
fs = rootdir.getFileSystem(conf);
Path logPath = new Path(logfile);
if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) {
corruptedLogs.add(logPath);
} else {
processedLogs.add(logPath);
}
archiveLogs(null, corruptedLogs, processedLogs, oldLogDir, fs, conf);
Path stagingDir = ZKSplitLog.getSplitLogDir(rootdir, logPath.getName());
HBaseFileSystem.deleteDirFromFileSystem(fs, stagingDir);
}
示例4: deleteRegionWithoutArchiving
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Without regard for backup, delete a region. Should be used with caution.
* @param regionDir {@link Path} to the region to be deleted.
* @param fs FileSystem from which to delete the region
* @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
* @throws IOException on filesystem operation failure
*/
private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir)
throws IOException {
if (HBaseFileSystem.deleteDirFromFileSystem(fs, regionDir)) {
LOG.debug("Deleted all region files in: " + regionDir);
return true;
}
LOG.debug("Failed to delete region directory:" + regionDir);
return false;
}
示例5: remove
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
@Override
public HTableDescriptor remove(final String tablename)
throws IOException {
if (!this.fsreadonly) {
Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
if (this.fs.exists(tabledir)) {
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tabledir)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
}
TableDescriptorModtime tdm = this.cache.remove(tablename);
return tdm == null ? null : tdm.getTableDescriptor();
}
示例6: deleteReader
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Delete this file
* @throws IOException
*/
public void deleteReader() throws IOException {
closeReader(true);
HBaseFileSystem.deleteDirFromFileSystem(fs, getPath());
if (hasIndex && getIndexPath() != null) {
// when it's a reference file, index path can be null
HBaseFileSystem.deleteDirFromFileSystem(fs, getIndexPath());
}
WinterOptimizer.ThrowWhenCalled("winter consider deleteReader!");
// if (hasLCCIndex && mWinterGetLCCIndexPath() != null) {
// HBaseFileSystem.deleteDirFromFileSystem(fs, mWinterGetLCCIndexPath());
// }
}
示例7: createSplitDir
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* @param fs Filesystem to use
* @param splitdir Directory to store temporary split data in
* @throws IOException If <code>splitdir</code> already exists or we fail
* to create it.
* @see #cleanupSplitDir(FileSystem, Path)
*/
void createSplitDir(final FileSystem fs, final Path splitdir)
throws IOException {
if (fs.exists(splitdir)) {
LOG.info("The " + splitdir
+ " directory exists. Hence deleting it to recreate it");
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, splitdir)) {
throw new IOException("Failed deletion of " + splitdir
+ " before creating them again.");
}
}
if (!HBaseFileSystem.makeDirOnFileSystem(fs, splitdir))
throw new IOException("Failed create of " + splitdir);
}
示例8: deleteDir
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* @param fs Filesystem to use
* @param dir Directory to delete
* @param mustPreExist If true, we'll throw exception if <code>dir</code>
* does not preexist, else we'll just pass.
* @throws IOException Thrown if we fail to delete passed <code>dir</code>
*/
private static void deleteDir(final FileSystem fs, final Path dir,
final boolean mustPreExist)
throws IOException {
if (!fs.exists(dir)) {
if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
} else if (!HBaseFileSystem.deleteDirFromFileSystem(fs, dir)) {
throw new IOException("Failed delete of " + dir);
}
}
示例9: deleteRegion
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private static void deleteRegion(FileSystem fs, Path regiondir) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regiondir.toString());
}
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, regiondir)) {
LOG.warn("Failed delete of " + regiondir);
}
}
示例10: deleteReader
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Delete this file
* @throws IOException
*/
public void deleteReader() throws IOException {
closeReader(true);
HBaseFileSystem.deleteDirFromFileSystem(fs, getPath());
if(hasIndex && getIndexPath()!=null){
//when it's a reference file, index path can be null
HBaseFileSystem.deleteDirFromFileSystem(fs, getIndexPath());
}
}
示例11: deleteRegion
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private static void deleteRegion(FileSystem fs, Path regiondir)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regiondir.toString());
}
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, regiondir)) {
LOG.warn("Failed delete of " + regiondir);
}
}
示例12: deleteTable
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public void deleteTable(byte[] tableName) throws IOException {
HBaseFileSystem.deleteDirFromFileSystem(fs, new Path(rootdir, Bytes.toString(tableName)));
}
示例13: handleTableOperation
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
@Override
protected void handleTableOperation(List<HRegionInfo> regions)
throws IOException, KeeperException {
// 1. Wait because of region in transition
AssignmentManager am = this.masterServices.getAssignmentManager();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
long done = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < done) {
AssignmentManager.RegionState rs = am.isRegionInTransition(region);
if (rs == null) break;
Threads.sleep(waitingTimeForEvents);
LOG.debug("Waiting on region to clear regions in transition; " + rs);
}
if (am.isRegionInTransition(region) != null) {
throw new IOException("Waited hbase.master.wait.on.region (" +
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
}
// 2. Remove regions from META
LOG.debug("Deleting regions from META");
MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
// 3. Move the table in /hbase/.tmp
LOG.debug("Moving table directory to a temp directory");
MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
Path tempTableDir = mfs.moveTableToTemp(tableName);
try {
// 4. Delete regions from FS (temp directory)
FileSystem fs = mfs.getFileSystem();
for (HRegionInfo hri: regions) {
LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
}
// 5. Delete table from FS (temp directory)
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tempTableDir)) {
LOG.error("Couldn't delete " + tempTableDir);
}
} finally {
// 6. Update table descriptor cache
this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
// 7. If entry for this table in zk, and up in AssignmentManager, remove it.
am.getZKTable().setDeletedTable(Bytes.toString(tableName));
}
}
示例14: delete
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
@Override
public void delete() throws IOException {
HBaseFileSystem.deleteDirFromFileSystem(fs, file);
}
示例15: delete
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
@Override
public void delete() throws IOException {
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, file))
throw new IOException("Failed to delete:" + this.file);
}