本文整理汇总了Java中org.apache.hadoop.hbase.util.FSVisitor类的典型用法代码示例。如果您正苦于以下问题:Java FSVisitor类的具体用法?Java FSVisitor怎么用?Java FSVisitor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FSVisitor类属于org.apache.hadoop.hbase.util包,在下文中一共展示了FSVisitor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifySnapshot
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Verify the validity of the snapshot
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
final String table = snapshotDesc.getTable();
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(conf, table, region, family, hfile);
try {
link.getFileStatus(fs);
} catch (FileNotFoundException e) {
throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e);
}
}
});
}
示例2: getRegionHFileReferences
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Get the list of hfiles for the specified snapshot region.
* NOTE: The current implementation keeps one empty file per HFile in the region.
* The file name matches the one in the original table, and by reconstructing
* the path you can quickly jump to the referenced file.
*
* @param fs {@link FileSystem}
* @param snapshotRegionDir {@link Path} to the Snapshot region directory
* @return Map of hfiles per family, the key is the family name and values are hfile names
* @throws IOException if an error occurred while scanning the directory
*/
public static Map<String, List<String>> getRegionHFileReferences(final FileSystem fs,
final Path snapshotRegionDir) throws IOException {
final Map<String, List<String>> familyFiles = new TreeMap<String, List<String>>();
visitRegionStoreFiles(fs, snapshotRegionDir,
new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
List<String> hfiles = familyFiles.get(family);
if (hfiles == null) {
hfiles = new LinkedList<String>();
familyFiles.put(family, hfiles);
}
hfiles.add(hfile);
}
});
return familyFiles;
}
示例3: getHFileNames
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Returns the store file names in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
public static Set<String> getHFileNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
if (HFileLink.isHFileLink(hfile)) {
names.add(HFileLink.getReferencedHFileName(hfile));
} else {
names.add(hfile);
}
}
});
return names;
}
示例4: corruptSnapshot
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Corrupt the specified snapshot by deleting some files.
*
* @param util {@link HBaseTestingUtility}
* @param snapshotName name of the snapshot to corrupt
* @return array of the corrupted HFiles
* @throws IOException on unexecpted error reading the FS
*/
public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName)
throws IOException {
final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
mfs.getRootDir());
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final String table = snapshotDesc.getTable();
final ArrayList corruptedFiles = new ArrayList();
SnapshotReferenceUtil.visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(util.getConfiguration(), table, region, family, hfile);
if (corruptedFiles.size() % 2 == 0) {
fs.delete(link.getAvailablePath(fs));
corruptedFiles.add(hfile);
}
}
});
assertTrue(corruptedFiles.size() > 0);
return corruptedFiles;
}
示例5: verifySnapshot
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Verify the validity of the snapshot
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
final TableName table = TableName.valueOf(snapshotDesc.getTable());
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(conf, table, region, family, hfile);
try {
link.getFileStatus(fs);
} catch (FileNotFoundException e) {
throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e);
}
}
});
}
示例6: getWALNames
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Returns the log file names available in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the names of wals in the specified snaphot
*/
public static Set<String> getWALNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
@Override
public void logFile (final String server, final String logfile) throws IOException {
names.add(logfile);
}
});
return names;
}
示例7: listHFileNames
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* List all the HFiles in the given table
*
* @param fs: FileSystem where the table lives
* @param tableDir directory of the table
* @return array of the current HFiles in the table (could be a zero-length array)
* @throws IOException on unexecpted error reading the FS
*/
public static ArrayList<String> listHFileNames(final FileSystem fs, final Path tableDir)
throws IOException {
final ArrayList<String> hfiles = new ArrayList<String>();
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
@Override
public void storeFile(final String region, final String family, final String hfileName)
throws IOException {
hfiles.add(hfileName);
}
});
Collections.sort(hfiles);
return hfiles;
}
示例8: setupTable
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
private void setupTable(final TableName tableName) throws IOException {
// load the table
Table table = UTIL.createTable(tableName, FAMILY_NAME);
try {
rowCount = 0;
byte[] value = new byte[1024];
byte[] q = Bytes.toBytes("q");
while (rowCount < NUM_ROWS) {
Put put = new Put(Bytes.toBytes(String.format("%010d", rowCount)));
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY_NAME, q, value);
table.put(put);
if ((rowCount++ % ROW_PER_FILE) == 0) {
// flush it
((HTable)table).flushCommits();
UTIL.getHBaseAdmin().flush(tableName);
}
}
} finally {
UTIL.getHBaseAdmin().flush(tableName);
table.close();
}
assertEquals(NUM_ROWS, rowCount);
// get the store file paths
storeFiles.clear();
tableDir = FSUtils.getTableDir(getRootDir(), tableName);
FSVisitor.visitTableStoreFiles(getFileSystem(), tableDir, new FSVisitor.StoreFileVisitor() {
@Override
public void storeFile(final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.build(UTIL.getConfiguration(), tableName, region, family, hfile);
storeFiles.add(link.getOriginPath());
}
});
assertTrue("Expected at least " + NUM_FILES + " store files", storeFiles.size() >= NUM_FILES);
LOG.info("Store files: " + storeFiles);
}
示例9: verifyRegion
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Verify that the region (regioninfo, hfiles) are valid
* @param fs the FileSystem instance
* @param snapshotDir snapshot directory to check
* @param region the region to check
*/
private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region)
throws IOException {
// make sure we have region in the snapshot
Path regionDir = new Path(snapshotDir, region.getEncodedName());
// make sure we have the region info in the snapshot
Path regionInfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
// make sure the file exists
if (!fs.exists(regionInfo)) {
throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
}
FSDataInputStream in = fs.open(regionInfo);
HRegionInfo found = new HRegionInfo();
try {
found.readFields(in);
if (!region.equals(found)) {
throw new CorruptedSnapshotException("Found region info (" + found
+ ") doesn't match expected region:" + region, snapshot);
}
} finally {
in.close();
}
// make sure we have the expected recovered edits files
TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
// make sure we have all the expected store files
SnapshotReferenceUtil.visitRegionStoreFiles(fs, regionDir, new FSVisitor.StoreFileVisitor() {
public void storeFile(final String regionNameSuffix, final String family,
final String hfileName) throws IOException {
verifyStoreFile(snapshotDir, region, family, hfileName);
}
});
}
示例10: getHLogNames
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Returns the log file names available in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the names of hlogs in the specified snaphot
*/
public static Set<String> getHLogNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
public void logFile (final String server, final String logfile) throws IOException {
names.add(logfile);
}
});
return names;
}
示例11: listHFiles
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* List all the HFiles in the given table
*
* @param fs: FileSystem where the table lives
* @param tableDir directory of the table
* @return array of the current HFiles in the table (could be a zero-length array)
* @throws IOException on unexecpted error reading the FS
*/
public static Path[] listHFiles(final FileSystem fs, final Path tableDir)
throws IOException {
final ArrayList<Path> hfiles = new ArrayList<Path>();
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
public void storeFile(final String region, final String family, final String hfileName)
throws IOException {
hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName))));
}
});
return hfiles.toArray(new Path[hfiles.size()]);
}
示例12: listHFiles
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* List all the HFiles in the given table
*
* @param fs: FileSystem where the table lives
* @param tableDir directory of the table
* @return array of the current HFiles in the table (could be a zero-length array)
* @throws IOException on unexecpted error reading the FS
*/
public static Path[] listHFiles(final FileSystem fs, final Path tableDir)
throws IOException {
final ArrayList<Path> hfiles = new ArrayList<Path>();
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
@Override
public void storeFile(final String region, final String family, final String hfileName)
throws IOException {
hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName))));
}
});
return hfiles.toArray(new Path[hfiles.size()]);
}
示例13: verifyRegion
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Verify that the region (regioninfo, hfiles) are valid
* @param fs the FileSystem instance
* @param snapshotDir snapshot directory to check
* @param region the region to check
*/
private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region)
throws IOException {
// make sure we have region in the snapshot
Path regionDir = new Path(snapshotDir, region.getEncodedName());
// make sure we have the region info in the snapshot
Path regionInfo = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
// make sure the file exists
if (!fs.exists(regionInfo)) {
throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
}
HRegionInfo found = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
if (!region.equals(found)) {
throw new CorruptedSnapshotException("Found region info (" + found
+ ") doesn't match expected region:" + region, snapshot);
}
// make sure we have the expected recovered edits files
TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
// make sure we have all the expected store files
SnapshotReferenceUtil.visitRegionStoreFiles(fs, regionDir, new FSVisitor.StoreFileVisitor() {
public void storeFile(final String regionNameSuffix, final String family,
final String hfileName) throws IOException {
verifyStoreFile(snapshotDir, region, family, hfileName);
}
});
}
示例14: corruptSnapshot
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
/**
* Corrupt the specified snapshot by deleting some files.
*
* @param util {@link HBaseTestingUtility}
* @param snapshotName name of the snapshot to corrupt
* @return array of the corrupted HFiles
* @throws IOException on unexecpted error reading the FS
*/
public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName)
throws IOException {
final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
mfs.getRootDir());
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final TableName table = TableName.valueOf(snapshotDesc.getTable());
final ArrayList corruptedFiles = new ArrayList();
SnapshotReferenceUtil.visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
@Override
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(util.getConfiguration(), table, region, family, hfile);
if (corruptedFiles.size() % 2 == 0) {
fs.delete(link.getAvailablePath(fs));
corruptedFiles.add(hfile);
}
}
});
assertTrue(corruptedFiles.size() > 0);
return corruptedFiles;
}
示例15: run
import org.apache.hadoop.hbase.util.FSVisitor; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
if (args.length != 2 && args.length != 3) {
usage();
return -1;
}
String dirPath = args[0];
TableName tableName = TableName.valueOf(args[1]);
if (args.length == 2) {
return !run(dirPath, tableName).isEmpty() ? 0 : -1;
} else {
Map<byte[], List<Path>> family2Files = Maps.newHashMap();
FileSystem fs = FileSystem.get(getConf());
for (FileStatus regionDir : fs.listStatus(new Path(dirPath))) {
FSVisitor.visitRegionStoreFiles(fs, regionDir.getPath(), (region, family, hfileName) -> {
Path path = new Path(regionDir.getPath(), new Path(family, hfileName));
byte[] familyName = Bytes.toBytes(family);
if (family2Files.containsKey(familyName)) {
family2Files.get(familyName).add(path);
} else {
family2Files.put(familyName, Lists.newArrayList(path));
}
});
}
return !run(family2Files, tableName).isEmpty() ? 0 : -1;
}
}