本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.listStatus方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.listStatus方法的具体用法?Java FSUtils.listStatus怎么用?Java FSUtils.listStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSUtils
的用法示例。
在下文中一共展示了FSUtils.listStatus方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doOfflineLogSplitting
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Performs log splitting for all regionserver directories.
* @throws Exception
*/
private void doOfflineLogSplitting() throws Exception {
LOG.info("Starting Log splitting");
final Path rootDir = FSUtils.getRootDir(getConf());
final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
// since this is the singleton, we needn't close it.
final WALFactory factory = WALFactory.getInstance(getConf());
FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
LOG.info("No log directories to split, returning");
return;
}
try {
for (FileStatus regionServerLogDir : regionServerLogDirs) {
// split its log dir, if exists
WALSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf(), factory);
}
LOG.info("Successfully completed Log splitting");
} catch (Exception e) {
LOG.error("Got exception while doing Log splitting ", e);
throw e;
}
}
示例2: getAllFiles
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Get all the files (non-directory entries) in the file system under the passed directory
* @param dir directory to investigate
* @return all files under the directory
*/
private List<Path> getAllFiles(FileSystem fs, Path dir) throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, dir, null);
if (files == null) {
LOG.warn("No files under:" + dir);
return null;
}
List<Path> allFiles = new ArrayList<Path>();
for (FileStatus file : files) {
if (file.isDirectory()) {
List<Path> subFiles = getAllFiles(fs, file.getPath());
if (subFiles != null) allFiles.addAll(subFiles);
continue;
}
allFiles.add(file.getPath());
}
return allFiles;
}
示例3: getFileList
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Get a list of paths that need to be split given a set of server-specific directories and
* optionally a filter.
*
* See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
* layout.
*
* Should be package-private, but is needed by
* {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
* Configuration, WALFactory)} for tests.
*/
@VisibleForTesting
public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
final PathFilter filter)
throws IOException {
List<FileStatus> fileStatus = new ArrayList<FileStatus>();
for (Path logDir : logDirs) {
final FileSystem fs = logDir.getFileSystem(conf);
if (!fs.exists(logDir)) {
LOG.warn(logDir + " doesn't exist. Nothing to do!");
continue;
}
FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
if (logfiles == null || logfiles.length == 0) {
LOG.info(logDir + " is empty dir, no logs to split");
} else {
Collections.addAll(fileStatus, logfiles);
}
}
FileStatus[] a = new FileStatus[fileStatus.size()];
return fileStatus.toArray(a);
}
示例4: isFileDeletable
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Override
public boolean isFileDeletable(FileStatus fStat) {
try {
// if its a directory, then it can be deleted
if (fStat.isDirectory()) return true;
Path file = fStat.getPath();
// check to see if
FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
// if the file doesn't exist, then it can be deleted (but should never
// happen since deleted files shouldn't get passed in)
if (deleteStatus == null) return true;
// otherwise, we need to check the file's table and see its being archived
Path family = file.getParent();
Path region = family.getParent();
Path table = region.getParent();
String tableName = table.getName();
boolean ret = !archiveTracker.keepHFiles(tableName);
LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
return ret;
} catch (IOException e) {
LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
return false;
}
}
示例5: archiveFamily
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Remove from the specified region the store files of the specified column family,
* either by archiving them or outright deletion
* @param fs the filesystem where the store files live
* @param conf {@link Configuration} to examine to determine the archive directory
* @param parent Parent region hosting the store files
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveFamily(FileSystem fs, Configuration conf,
HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
if (storeFiles == null) {
LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
", family=" + Bytes.toString(family));
return;
}
FileStatusConverter getAsFile = new FileStatusConverter(fs);
Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
// do the actual archive
if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
throw new IOException("Failed to archive/delete all the files for region:"
+ Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
示例6: getStoreDirHosts
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, path);
if (files == null) {
return new String[] {};
}
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
for (FileStatus hfileStatus: files) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
List<String> hosts = hdfsBlocksDistribution.getTopHosts();
return hosts.toArray(new String[hosts.size()]);
}
示例7: cleanupAnySplitDetritus
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Clean up any split detritus that may have been left around from previous split attempts. Call
* this method on initial region deploy.
*
* @throws IOException
*/
void cleanupAnySplitDetritus() throws IOException {
Path splitdir = this.getSplitsDir();
if (!fs.exists(splitdir)) return;
// Look at the splitdir. It could have the encoded names of the daughter
// regions we tried to make. See if the daughter regions actually got made
// out under the tabledir. If here under splitdir still, then the split did
// not complete. Try and do cleanup. This code WILL NOT catch the case
// where we successfully created daughter a but regionserver crashed during
// the creation of region b. In this case, there'll be an orphan daughter
// dir in the filesystem. TOOD: Fix.
FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
if (daughters != null) {
for (FileStatus daughter : daughters) {
Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
throw new IOException("Failed delete of " + daughterDir);
}
}
}
cleanupSplitsDir();
LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
示例8: removeTableInfoInPre96Format
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under
* table directory).
* @param tableName
* @throws IOException
*/
private void removeTableInfoInPre96Format(TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, tableName);
FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER);
if (status == null) return;
for (FileStatus fStatus : status) {
FSUtils.delete(fs, fStatus.getPath(), false);
}
}
示例9: getCurrentTableInfoStatus
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
if (status == null || status.length < 1) return null;
FileStatus mostCurrent = null;
for (FileStatus file : status) {
if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
mostCurrent = file;
}
}
return mostCurrent;
}
示例10: getSnapshotsInProgress
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@VisibleForTesting List<String> getSnapshotsInProgress() throws IOException {
List<String> snapshotInProgress = Lists.newArrayList();
// only add those files to the cache, but not to the known snapshots
Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
// only add those files to the cache, but not to the known snapshots
FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
if (running != null) {
for (FileStatus run : running) {
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
}
}
return snapshotInProgress;
}
示例11: getTableRegionFamilyFiles
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* @return The set of files in the specified family directory.
*/
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
if (hfiles == null) return Collections.emptySet();
Set<String> familyFiles = new HashSet<String>(hfiles.length);
for (int i = 0; i < hfiles.length; ++i) {
String hfileName = hfiles[i].getPath().getName();
familyFiles.add(hfileName);
}
return familyFiles;
}
示例12: getTableRegions
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* @return the set of the regions contained in the table
*/
private List<HRegionInfo> getTableRegions() throws IOException {
LOG.debug("get table regions: " + tableDir);
FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regionDirs == null) return null;
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
for (int i = 0; i < regionDirs.length; ++i) {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
regions.add(hri);
}
LOG.debug("found " + regions.size() + " regions for table=" +
tableDesc.getTableName().getNameAsString());
return regions;
}
示例13: getAllFileNames
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Get the names of all the files below the given directory
* @param fs
* @param archiveDir
* @return
* @throws IOException
*/
private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, archiveDir, new PathFilter() {
@Override
public boolean accept(Path p) {
if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
return false;
}
return true;
}
});
return recurseOnFiles(fs, files, new ArrayList<String>());
}
示例14: getFamilies
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* @return the set of families present on disk
* @throws IOException
*/
public Collection<String> getFamilies() throws IOException {
FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
if (fds == null) return null;
ArrayList<String> families = new ArrayList<String>(fds.length);
for (FileStatus status : fds) {
families.add(status.getPath().getName());
}
return families;
}
示例15: getSplitEditFilesSorted
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Returns sorted set of edit files made by splitter, excluding files
* with '.temp' suffix.
*
* @param fs
* @param regiondir
* @return Files in passed <code>regiondir</code> as a sorted set.
* @throws IOException
*/
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
final Path regiondir) throws IOException {
NavigableSet<Path> filesSorted = new TreeSet<Path>();
Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
if (!fs.exists(editsdir))
return filesSorted;
FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {
@Override
public boolean accept(Path p) {
boolean result = false;
try {
// Return files and only files that match the editfile names pattern.
// There can be other files in this directory other than edit files.
// In particular, on error, we'll move aside the bad edit file giving
// it a timestamp suffix. See moveAsideBadEditsFile.
Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
result = fs.isFile(p) && m.matches();
// Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
// because it means splitwal thread is writting this file.
if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
result = false;
}
// Skip SeqId Files
if (isSequenceIdFile(p)) {
result = false;
}
} catch (IOException e) {
LOG.warn("Failed isFile check on " + p);
}
return result;
}
});
if (files == null) {
return filesSorted;
}
for (FileStatus status : files) {
filesSorted.add(status.getPath());
}
return filesSorted;
}