本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.isExists方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.isExists方法的具体用法?Java FSUtils.isExists怎么用?Java FSUtils.isExists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSUtils
的用法示例。
在下文中一共展示了FSUtils.isExists方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeRegionInfoOnFilesystem
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Write out an info file under the region directory. Useful recovering mangled regions.
*
* @param regionInfoContent serialized version of the {@link HRegionInfo}
* @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
*/
private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir)
throws IOException {
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
if (useTempDir) {
// Create in tmpDir and then move into place in case we crash after
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
// And then create the file
Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
// If datanode crashes or if the RS goes down just before the close is
// called while trying to
// close the created regioninfo file in the .tmp directory then on next
// creation we will be getting AlreadyCreatedException.
// Hence delete and create the file if exists.
if (FSUtils.isExists(fs, tmpPath)) {
FSUtils.delete(fs, tmpPath, true);
}
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
// Move the created file to the original path
if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
}
} else {
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
}
}
示例2: checkDaughterInFs
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Checks if a daughter region -- either splitA or splitB -- still holds
* references to parent.
* @param parent Parent region
* @param daughter Daughter region
* @return A pair where the first boolean says whether or not the daughter
* region directory exists in the filesystem and then the second boolean says
* whether the daughter has references to the parent.
* @throws IOException
*/
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
throws IOException {
if (daughter == null) {
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());
Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName());
HRegionFileSystem regionFs = null;
try {
if (!FSUtils.isExists(fs, daughterRegionDir)) {
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
}
} catch (IOException ioe) {
LOG.warn("Error trying to determine if daughter region exists, " +
"assuming exists and has references", ioe);
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
}
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, daughter, true);
} catch (IOException e) {
LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName()
+ ", to: " + parent.getEncodedName() + " assuming has references", e);
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.TRUE);
}
boolean references = false;
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
}
}
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
}