本文整理汇总了Java中org.apache.hadoop.hbase.HBaseFileSystem.renameDirForFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseFileSystem.renameDirForFileSystem方法的具体用法?Java HBaseFileSystem.renameDirForFileSystem怎么用?Java HBaseFileSystem.renameDirForFileSystem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseFileSystem
的用法示例。
在下文中一共展示了HBaseFileSystem.renameDirForFileSystem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getLogDirs
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private List<Path> getLogDirs(final List<ServerName> serverNames) throws IOException {
List<Path> logDirs = new ArrayList<Path>();
for(ServerName serverName: serverNames){
Path logDir = new Path(this.rootdir,
HLog.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
// rename the directory so a rogue RS doesn't create more HLogs
if (fs.exists(logDir)) {
if (!HBaseFileSystem.renameDirForFileSystem(fs, logDir, splitDir)) {
throw new IOException("Failed fs.rename for log split: " + logDir);
}
logDir = splitDir;
LOG.debug("Renamed region directory: " + splitDir);
} else if (!fs.exists(splitDir)) {
LOG.info("Log dir for server " + serverName + " does not exist");
continue;
}
logDirs.add(splitDir);
}
return logDirs;
}
示例2: restoreLCCHFiles
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public static synchronized void restoreLCCHFiles() throws IOException {
List<Path> listPath =
listLCCPath(LCCIndexConstant.INDEX_DIR_NAME_DEBUG, LCCIndexConstant.INDEX_DIR_NAME);
Path lccPath;
for (Path p : listPath) {
// p: /hbase/lcc/**/f/.debuglcc/name
lccPath = new Path(p.getParent().getParent(), LCCIndexConstant.INDEX_DIR_NAME);
if (!hdfs.exists(lccPath)) {
hdfs.mkdirs(lccPath);
}
if (hdfs.exists(p)) {
Path newPath = new Path(lccPath, p.getName());
if (!HBaseFileSystem.renameDirForFileSystem(hdfs, p, newPath)) {
System.out.println("winter in LCCIndexDebugger, restore file error: " + p);
}
} else {
System.out.println("winter in LCCIndexDebugger, lcc debug hfile should exists but not: "
+ p);
}
}
}
示例3: moveToTemp
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Move the specified file/directory to the hbase temp directory.
* @param path The path of the file/directory to move
* @return The temp location of the file/directory moved
* @throws IOException in case of file-system failure
*/
public Path moveToTemp(final Path path) throws IOException {
Path tempPath = new Path(this.tempdir, path.getName());
// Ensure temp exists
if (!fs.exists(tempdir) && !HBaseFileSystem.makeDirOnFileSystem(fs, tempdir)) {
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, path, tempPath)) {
throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
}
return tempPath;
}
示例4: rename
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Utility to help with rename.
* @param fs
* @param src
* @param tgt
* @return True if succeeded.
* @throws IOException
*/
public static Path rename(final FileSystem fs, final Path src, final Path tgt) throws IOException {
if (!fs.exists(src)) {
throw new FileNotFoundException(src.toString());
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, src, tgt)) {
throw new IOException("Failed rename of " + src + " to " + tgt);
}
return tgt;
}
示例5: moveAsideBadEditsFile
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Move aside a bad edits file.
* @param fs
* @param edits Edits file to move aside.
* @return The name of the moved aside file.
* @throws IOException
*/
public static Path moveAsideBadEditsFile(final FileSystem fs,
final Path edits)
throws IOException {
Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
System.currentTimeMillis());
if (!HBaseFileSystem.renameDirForFileSystem(fs, edits, moveAsideName)) {
LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
}
return moveAsideName;
}
示例6: moveInitialFilesIntoPlace
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
static void moveInitialFilesIntoPlace(final FileSystem fs, final Path initialFiles,
final Path regiondir) throws IOException {
if (initialFiles != null && fs.exists(initialFiles)) {
if (!HBaseFileSystem.renameDirForFileSystem(fs, initialFiles, regiondir)) {
LOG.warn("Unable to rename " + initialFiles + " to " + regiondir);
}
}
}
示例7: mWinterLCCCompleteCompactionHDFS
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private void mWinterLCCCompleteCompactionHDFS(final Collection<StoreFile> compactedFiles,
final StoreFile.Writer compactedFile) throws IOException {
// lccIndexOrigPath = /hbase/lcc/xxx/.tmp/aaa.lccindex
Path lccIndexDirPath = mWinterGetLCCIndexFilePathFromHFilePathInTmp(compactedFile.getPath());
FileStatus[] fileStatusArray = fs.listStatus(lccIndexDirPath);
if (fileStatusArray == null || fileStatusArray.length == 0) {
return;
}
for (FileStatus fileStatus : fileStatusArray) {
// fileStatus = /hbase/lcc/xxx/.tmp/aaa.lccindex/qualifier
// System.out.println("winter checking lccIndexRawPath: " + fileStatus.getPath());
// lccIndexDir = /hbase/lcc/AAA/.lccindex + Q1-Q4 + BBB
Path lccIndexDstPath =
new Path(new Path(lccIndexDir, fileStatus.getPath().getName()), compactedFile.getPath()
.getName());
// System.out.println("winter checking lccIndexDstPath: " + lccIndexDstPath);
if (!fs.exists(lccIndexDstPath.getParent())) {
// System.out.println("winter lccindex dir path not exists, create first: "
// + lccIndexDstPath.getParent());
HBaseFileSystem.makeDirOnFileSystem(fs, lccIndexDstPath.getParent());
}
// System.out.println("winter renaming compacted lcc index file at " + fileStatus.getPath()
// + " to " + lccIndexDstPath);
LOG.info("Renaming compacted index file at " + fileStatus.getPath() + " to "
+ lccIndexDstPath);
if (!HBaseFileSystem.renameDirForFileSystem(fs, fileStatus.getPath(), lccIndexDstPath)) {
LOG.error("Failed move of compacted index file " + fileStatus.getPath() + " to "
+ lccIndexDstPath);
WinterOptimizer.NeedImplementation("Failed move of compacted index file "
+ fileStatus.getPath() + " to " + lccIndexDstPath);
}
}
fileStatusArray = fs.listStatus(lccIndexDirPath);
if (fileStatusArray != null && fileStatusArray.length == 0) {
HFileArchiver.mWinterArchiveFile(conf, fs, this.region, family.getName(), lccIndexDirPath);
} else {
WinterOptimizer.ThrowWhenCalled("winter completeCompaction lcc dir should be empty but not: "
+ lccIndexDirPath);
}
}
示例8: updateAllPossibleLCCHFiles
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public static synchronized int updateAllPossibleLCCHFiles() throws IOException {
List<Path> listPath =
listLCCPath(LCCIndexConstant.INDEX_DIR_NAME, LCCIndexConstant.INDEX_DIR_NAME_DEBUG);
Path debugPath;
int changes = 0;
for (Path p : listPath) {
// p: /hbase/lcc/**/f/.lccindex/name
debugPath = new Path(p.getParent().getParent(), LCCIndexConstant.INDEX_DIR_NAME_DEBUG);
if (!hdfs.exists(debugPath)) {
hdfs.mkdirs(debugPath);
}
if (lccIndexHFileMap.containsKey(p)) {
// todo nothing now
} else { // now inside, put it in
if (hdfs.exists(p)) {
Path newPath = new Path(debugPath, p.getName());
if (!HBaseFileSystem.renameDirForFileSystem(hdfs, p, newPath)) {
System.out.println("winter in LCCIndexDebugger, rename file error: " + p);
} else {
lccIndexHFileMap.put(p, newPath);
}
} else {
System.out.println("winter in LCCIndexDebugger, lcc hfile should exists but not: " + p);
}
}
}
return changes;
}
示例9: rename
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Utility to help with rename.
* @param fs
* @param src
* @param tgt
* @return True if succeeded.
* @throws IOException
*/
public static Path rename(final FileSystem fs,
final Path src,
final Path tgt)
throws IOException {
if (!fs.exists(src)) {
throw new FileNotFoundException(src.toString());
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, src, tgt)) {
throw new IOException("Failed rename of " + src + " to " + tgt);
}
return tgt;
}
示例10: moveInitialFilesIntoPlace
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
static void moveInitialFilesIntoPlace(final FileSystem fs,
final Path initialFiles, final Path regiondir)
throws IOException {
if (initialFiles != null && fs.exists(initialFiles)) {
if (!HBaseFileSystem.renameDirForFileSystem(fs, initialFiles, regiondir)) {
LOG.warn("Unable to rename " + initialFiles + " to " + regiondir);
}
}
}
示例11: writeTableDescriptor
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* @param fs
* @param hTableDescriptor
* @param tableDir
* @param status
* @return Descriptor file or null if we failed write.
* @throws IOException
*/
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir,
final FileStatus status)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid
// half-written file phenomeon.
Path tmpTableDir = new Path(tableDir, ".tmp");
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
// should help in this case some but its hard to do guarantees in face of
// concurrent schema edits.
int currentSequenceid =
status == null? 0: getTableInfoSequenceid(status.getPath());
int sequenceid = currentSequenceid;
// Put arbitrary upperbound on how often we retry
int retries = 10;
int retrymax = currentSequenceid + retries;
Path tableInfoPath = null;
do {
sequenceid += 1;
Path p = getTableInfoFileName(tmpTableDir, sequenceid);
if (fs.exists(p)) {
LOG.debug(p + " exists; retrying up to " + retries + " times");
continue;
}
try {
writeHTD(fs, p, hTableDescriptor);
tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
if (!HBaseFileSystem.renameDirForFileSystem(fs, p, tableInfoPath)) {
throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
}
} catch (IOException ioe) {
// Presume clash of names or something; go around again.
LOG.debug("Failed write and/or rename; retrying", ioe);
if (!FSUtils.deleteDirectory(fs, p)) {
LOG.warn("Failed cleanup of " + p);
}
tableInfoPath = null;
continue;
}
// Cleanup old schema file.
if (status != null) {
if (!FSUtils.deleteDirectory(fs, status.getPath())) {
LOG.warn("Failed delete of " + status.getPath() + "; continuing");
}
}
break;
} while (sequenceid < retrymax);
return tableInfoPath;
}
示例12: getRegionSplitEditsPath
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Path to a file under RECOVERED_EDITS_DIR directory of the region found in
* <code>logEntry</code> named for the sequenceid in the passed
* <code>logEntry</code>: e.g. /hbase/some_table/2323432434/recovered.edits/2332.
* This method also ensures existence of RECOVERED_EDITS_DIR under the region
* creating it if necessary.
* @param fs
* @param logEntry
* @param rootDir HBase root dir.
* @return Path to file into which to dump split log edits.
* @throws IOException
*/
static Path getRegionSplitEditsPath(final FileSystem fs,
final Entry logEntry, final Path rootDir, boolean isCreate)
throws IOException {
Path tableDir = HTableDescriptor.getTableDir(rootDir, logEntry.getKey()
.getTablename());
String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName());
Path regiondir = HRegion.getRegionDir(tableDir, encodedRegionName);
Path dir = HLog.getRegionDirRecoveredEditsDir(regiondir);
if (!fs.exists(regiondir)) {
LOG.info("This region's directory doesn't exist: "
+ regiondir.toString() + ". It is very likely that it was" +
" already split so it's safe to discard those edits.");
return null;
}
if (fs.exists(dir) && fs.isFile(dir)) {
Path tmp = new Path("/tmp");
if (!fs.exists(tmp)) {
fs.mkdirs(tmp);
}
tmp = new Path(tmp,
HLog.RECOVERED_EDITS_DIR + "_" + encodedRegionName);
LOG.warn("Found existing old file: " + dir + ". It could be some "
+ "leftover of an old installation. It should be a folder instead. "
+ "So moving it to " + tmp);
if (!HBaseFileSystem.renameDirForFileSystem(fs, dir, tmp)) {
LOG.warn("Failed to sideline old file " + dir);
}
}
if (isCreate && !fs.exists(dir) &&
!HBaseFileSystem.makeDirOnFileSystem(fs, dir)) {
LOG.warn("mkdir failed on " + dir);
}
// Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
// region's replayRecoveredEdits will not delete it
String fileName = formatRecoveredEditsFileName(logEntry.getKey()
.getLogSeqNum());
fileName = getTmpRecoveredEditsFileName(fileName);
return new Path(dir, fileName);
}
示例13: closeStreams
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Close all of the output streams.
* @return the list of paths written.
*/
private List<Path> closeStreams() throws IOException {
Preconditions.checkState(!closeAndCleanCompleted);
List<Path> paths = new ArrayList<Path>();
List<IOException> thrown = Lists.newArrayList();
closeLogWriters(thrown);
for (Map.Entry<byte[], WriterAndPath> logWritersEntry : logWriters
.entrySet()) {
WriterAndPath wap = logWritersEntry.getValue();
Path dst = getCompletedRecoveredEditsFilePath(wap.p,
regionMaximumEditLogSeqNum.get(logWritersEntry.getKey()));
try {
if (!dst.equals(wap.p) && fs.exists(dst)) {
LOG.warn("Found existing old edits file. It could be the "
+ "result of a previous failed split attempt. Deleting " + dst
+ ", length=" + fs.getFileStatus(dst).getLen());
if (!HBaseFileSystem.deleteFileFromFileSystem(fs, dst)) {
LOG.warn("Failed deleting of old " + dst);
throw new IOException("Failed deleting of old " + dst);
}
}
// Skip the unit tests which create a splitter that reads and writes
// the data without touching disk. TestHLogSplit#testThreading is an
// example.
if (fs.exists(wap.p)) {
if (!HBaseFileSystem.renameDirForFileSystem(fs, wap.p, dst)) {
throw new IOException("Failed renaming " + wap.p + " to " + dst);
}
LOG.debug("Rename " + wap.p + " to " + dst);
}
} catch (IOException ioe) {
LOG.error("Couldn't rename " + wap.p + " to " + dst, ioe);
thrown.add(ioe);
continue;
}
paths.add(dst);
}
if (!thrown.isEmpty()) {
throw MultipleIOException.createIOException(thrown);
}
closeAndCleanCompleted = true;
return paths;
}
示例14: writeRegioninfoOnFilesystem
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
* Write out an info file under the region directory. Useful recovering mangled regions. If the
* regioninfo already exists on disk and there is information in the file, then we fast exit.
* @param regionInfo information about the region
* @param regiondir directory under which to write out the region info
* @param fs {@link FileSystem} on which to write the region info
* @param conf {@link Configuration} from which to extract specific file locations
* @throws IOException on unexpected error.
*/
public static void writeRegioninfoOnFilesystem(HRegionInfo regionInfo, Path regiondir,
FileSystem fs, Configuration conf) throws IOException {
Path regioninfoPath = new Path(regiondir, REGIONINFO_FILE);
if (fs.exists(regioninfoPath)) {
if (fs.getFileStatus(regioninfoPath).getLen() > 0) {
return;
}
LOG.info("Rewriting .regioninfo file at: " + regioninfoPath);
if (!fs.delete(regioninfoPath, false)) {
throw new IOException("Unable to remove existing " + regioninfoPath);
}
}
// Create in tmpdir and then move into place in case we crash after
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
// first check to get the permissions
FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// and then create the file
Path tmpPath = new Path(getTmpDir(regiondir), REGIONINFO_FILE);
// if datanode crashes or if the RS goes down just before the close is called while trying to
// close the created regioninfo file in the .tmp directory then on next
// creation we will be getting AlreadyCreatedException.
// Hence delete and create the file if exists.
if (FSUtils.isExists(fs, tmpPath)) {
FSUtils.delete(fs, tmpPath, true);
}
FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);
try {
regionInfo.write(out);
out.write('\n');
out.write('\n');
out.write(Bytes.toBytes(regionInfo.toString()));
} finally {
out.close();
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, tmpPath, regioninfoPath)) {
throw new IOException("Unable to rename " + tmpPath + " to " + regioninfoPath);
}
}
示例15: mWinterCommitLCCHDFS
import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private void mWinterCommitLCCHDFS(final Path path, MonitoredTask status) throws IOException {
// target must be f/.lccindex/Q1-Q4/BBB // path = /hbase/lcc/AAA/.tmp/BBB
Path lccIndexPath = new Path(path.toString() + LCCIndexConstant.INDEX_DIR_NAME);
FileStatus[] lccIndexTempFileStatus = fs.listStatus(lccIndexPath);
if (lccIndexTempFileStatus != null && lccIndexTempFileStatus.length > 0) {
for (FileStatus fileStatus : lccIndexTempFileStatus) {
// fileStatus = tempFileLocation = /hbase/lcc/AAA/.tmp/BBB.lccindex/Q1-Q4
// lccIndexDir = /hbase/lcc/AAA/f/.lccindex
// indexQualifierPath = /hbase/lcc/AAA/f/.lccindex/Q1-Q4
Path indexQualifierPath = new Path(lccIndexDir, fileStatus.getPath().getName());
if (!fs.exists(indexQualifierPath)) {
HBaseFileSystem.makeDirOnFileSystem(fs, indexQualifierPath);
LOG.debug("winter lccindex qualifier dir path does not exist, create first: "
+ indexQualifierPath);
} else {
LOG.debug("winter lccindex qualifier dir exists: " + indexQualifierPath);
} // lccIndexdstPath = /hbase/lcc/AAA/f/.lccindex/Q1-Q4/BBB Path
Path lccIndexDestPath = new Path(indexQualifierPath, path.getName());
String msg =
"Renaming flushed lccindex file at " + fileStatus.getPath() + " to " + lccIndexDestPath;
LOG.debug(msg);
status.setStatus("Flushing " + this + ": " + msg);
if (!HBaseFileSystem.renameDirForFileSystem(fs, new Path(fileStatus.getPath().getParent(),
fileStatus.getPath().getName() + LCCIndexConstant.LC_STAT_FILE_SUFFIX), new Path(
indexQualifierPath, path.getName() + LCCIndexConstant.LC_STAT_FILE_SUFFIX))) {
LOG.warn("Unable to rename lccindex stat file " + fileStatus.getPath() + " to "
+ lccIndexDestPath);
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, fileStatus.getPath(), lccIndexDestPath)) {
LOG.warn("Unable to rename lccindex file " + fileStatus.getPath() + " to "
+ lccIndexDestPath);
}
LOG.info("winter finish renaming lccindex file " + fileStatus.getPath() + " to "
+ lccIndexDestPath);
}
lccIndexTempFileStatus = fs.listStatus(lccIndexPath);
if (lccIndexTempFileStatus != null && lccIndexTempFileStatus.length == 0) {
HFileArchiver.mWinterArchiveFile(conf, fs, this.region, family.getName(), lccIndexPath);
} else {
WinterOptimizer.ThrowWhenCalled("winter commit lcc dir should be empty but not: "
+ lccIndexPath);
}
} else { // no lccindex exists
System.out.println("winter lccIndexPath dir not exist: " + lccIndexPath);
}
}