本文整理汇总了Java中org.apache.hadoop.hbase.HBaseFileSystem类的典型用法代码示例。如果您正苦于以下问题:Java HBaseFileSystem类的具体用法?Java HBaseFileSystem怎么用?Java HBaseFileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HBaseFileSystem类属于org.apache.hadoop.hbase包,在下文中一共展示了HBaseFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createInitialFileSystemLayout
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Create initial layout in filesystem.
* <ol>
* <li>Check if the root region exists and is readable, if not create it.
* Create hbase.version and the -ROOT- directory if not one.
* </li>
* <li>Create a log archive directory for RS to put archived logs</li>
* </ol>
* Idempotent.
*/
private Path createInitialFileSystemLayout() throws IOException {
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
// check if temp directory exists and clean it
checkTempDir(this.tempdir, conf, this.fs);
Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
// Make sure the region servers can archive their old logs
if(!this.fs.exists(oldLogDir)) {
HBaseFileSystem.makeDirOnFileSystem(fs, oldLogDir);
}
return oldLogDir;
}
示例2: getLogDirs
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
private List<Path> getLogDirs(final List<ServerName> serverNames) throws IOException {
List<Path> logDirs = new ArrayList<Path>();
for(ServerName serverName: serverNames){
Path logDir = new Path(this.rootdir,
HLog.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
// rename the directory so a rogue RS doesn't create more HLogs
if (fs.exists(logDir)) {
if (!HBaseFileSystem.renameDirForFileSystem(fs, logDir, splitDir)) {
throw new IOException("Failed fs.rename for log split: " + logDir);
}
logDir = splitDir;
LOG.debug("Renamed region directory: " + splitDir);
} else if (!fs.exists(splitDir)) {
LOG.info("Log dir for server " + serverName + " does not exist");
continue;
}
logDirs.add(splitDir);
}
return logDirs;
}
示例3: checkTempDir
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Make sure the hbase temp directory exists and is empty.
* NOTE that this method is only executed once just after the master becomes the active one.
*/
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
throws IOException {
// If the temp directory exists, clear the content (left over, from the previous run)
if (fs.exists(tmpdir)) {
// Archive table in temp, maybe left over from failed deletion,
// if not the cleaner will take care of them.
for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
}
}
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tmpdir)) {
throw new IOException("Unable to clean the temp directory: " + tmpdir);
}
}
// Create the temp directory
if (!HBaseFileSystem.makeDirOnFileSystem(fs, tmpdir)) {
throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
}
}
示例4: deleteFamilyFromFS
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = new Path(rootdir, region.getTableNameAsString());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, familyDir)) {
throw new IOException("Could not delete family "
+ Bytes.toString(familyName) + " from FileSystem for region "
+ region.getRegionNameAsString() + "(" + region.getEncodedName()
+ ")");
}
}
示例5: archiveStoreFile
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Archive the store file
* @param fs the filesystem where the store files live
* @param regionInfo region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @param storeFile file to be archived
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo, Configuration conf,
Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ Bytes.toString(family) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTimeMillis();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException("Failed to archive/delete the file for region:"
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into "
+ storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
示例6: finishSplitLogFile
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
public static void finishSplitLogFile(Path rootdir, Path oldLogDir,
String logfile, Configuration conf) throws IOException {
List<Path> processedLogs = new ArrayList<Path>();
List<Path> corruptedLogs = new ArrayList<Path>();
FileSystem fs;
fs = rootdir.getFileSystem(conf);
Path logPath = new Path(logfile);
if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) {
corruptedLogs.add(logPath);
} else {
processedLogs.add(logPath);
}
archiveLogs(null, corruptedLogs, processedLogs, oldLogDir, fs, conf);
Path stagingDir = ZKSplitLog.getSplitLogDir(rootdir, logPath.getName());
HBaseFileSystem.deleteDirFromFileSystem(fs, stagingDir);
}
示例7: createWAP
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir,
FileSystem fs, Configuration conf)
throws IOException {
Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, true);
if (regionedits == null) {
return null;
}
if (fs.exists(regionedits)) {
LOG.warn("Found existing old edits file. It could be the "
+ "result of a previous failed split attempt. Deleting "
+ regionedits + ", length="
+ fs.getFileStatus(regionedits).getLen());
if (!HBaseFileSystem.deleteFileFromFileSystem(fs, regionedits)) {
LOG.warn("Failed delete of old " + regionedits);
}
}
Writer w = createWriter(fs, regionedits, conf);
LOG.debug("Creating writer path=" + regionedits + " region="
+ Bytes.toStringBinary(region));
return (new WriterAndPath(regionedits, w));
}
示例8: restoreLCCHFiles
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
public static synchronized void restoreLCCHFiles() throws IOException {
List<Path> listPath =
listLCCPath(LCCIndexConstant.INDEX_DIR_NAME_DEBUG, LCCIndexConstant.INDEX_DIR_NAME);
Path lccPath;
for (Path p : listPath) {
// p: /hbase/lcc/**/f/.debuglcc/name
lccPath = new Path(p.getParent().getParent(), LCCIndexConstant.INDEX_DIR_NAME);
if (!hdfs.exists(lccPath)) {
hdfs.mkdirs(lccPath);
}
if (hdfs.exists(p)) {
Path newPath = new Path(lccPath, p.getName());
if (!HBaseFileSystem.renameDirForFileSystem(hdfs, p, newPath)) {
System.out.println("winter in LCCIndexDebugger, restore file error: " + p);
}
} else {
System.out.println("winter in LCCIndexDebugger, lcc debug hfile should exists but not: "
+ p);
}
}
}
示例9: archiveStoreFile
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Archive the store file
* @param fs the filesystem where the store files live
* @param regionInfo region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @param storeFile file to be archived
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ Bytes.toString(family) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTimeMillis();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException("Failed to archive/delete the file for region:"
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
示例10: markCorrupted
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
public static void markCorrupted(Path rootdir, String logFileName,
FileSystem fs) {
Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt");
try {
HBaseFileSystem.createNewFileOnFileSystem(fs, file);
} catch (IOException e) {
LOG.warn("Could not flag a log file as corrupted. Failed to create " +
file, e);
}
}
示例11: moveToTemp
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Move the specified file/directory to the hbase temp directory.
* @param path The path of the file/directory to move
* @return The temp location of the file/directory moved
* @throws IOException in case of file-system failure
*/
public Path moveToTemp(final Path path) throws IOException {
Path tempPath = new Path(this.tempdir, path.getName());
// Ensure temp exists
if (!fs.exists(tempdir) && !HBaseFileSystem.makeDirOnFileSystem(fs, tempdir)) {
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
}
if (!HBaseFileSystem.renameDirForFileSystem(fs, path, tempPath)) {
throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
}
return tempPath;
}
示例12: deleteRegionWithoutArchiving
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
/**
* Without regard for backup, delete a region. Should be used with caution.
* @param regionDir {@link Path} to the region to be deleted.
* @param fs FileSystem from which to delete the region
* @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
* @throws IOException on filesystem operation failure
*/
private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir)
throws IOException {
if (HBaseFileSystem.deleteDirFromFileSystem(fs, regionDir)) {
LOG.debug("Deleted all region files in: " + regionDir);
return true;
}
LOG.debug("Failed to delete region directory:" + regionDir);
return false;
}
示例13: delete
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
@Override
public void delete() throws IOException {
if (!compactedIndexFiles.isEmpty()) {
for (Path indexfile : compactedIndexFiles) {
if (!HBaseFileSystem.deleteFileFromFileSystem(fs, indexfile)) throw new IOException(
"Failed to delete:" + indexfile);
}
}
}
示例14: remove
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
@Override
public HTableDescriptor remove(final String tablename)
throws IOException {
if (!this.fsreadonly) {
Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
if (this.fs.exists(tabledir)) {
if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tabledir)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
}
TableDescriptorModtime tdm = this.cache.remove(tablename);
return tdm == null ? null : tdm.getTableDescriptor();
}
示例15: writeHTD
import org.apache.hadoop.hbase.HBaseFileSystem; //导入依赖的package包/类
private static void writeHTD(final FileSystem fs, final Path p,
final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = HBaseFileSystem.createPathOnFileSystem(fs, p, false);
try {
htd.write(out);
out.write('\n');
out.write('\n');
out.write(Bytes.toBytes(htd.toString()));
} finally {
out.close();
}
}