本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.logFileSystemState方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.logFileSystemState方法的具体用法?Java FSUtils.logFileSystemState怎么用?Java FSUtils.logFileSystemState使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSUtils
的用法示例。
在下文中一共展示了FSUtils.logFileSystemState方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSnapshots
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testSnapshots() throws IOException, InterruptedException {
String snapshots[][] = {snapshot1Keys, snapshot2Keys};
for(int i = 1; i <= snapshots.length; i++) {
for(TableName table: tables) {
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
LOG);
int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
Scan())) {
assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
}
Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
}
}
}
示例2: copySnapshotForScanner
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Copy the snapshot files for a snapshot scanner, discards meta changes.
* @param conf
* @param fs
* @param rootDir
* @param restoreDir
* @param snapshotName
* @throws IOException
*/
public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs,
Path rootDir, Path restoreDir, String snapshotName) throws IOException {
// ensure that restore dir is not under root dir
if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
throw new IllegalArgumentException("Filesystems for restore directory and HBase root directory " +
"should be the same");
}
if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath())) {
throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " +
"root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir);
}
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
MonitoredTask status = TaskMonitor.get().createStatus(
"Restoring snapshot '" + snapshotName + "' to directory " + restoreDir);
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher();
// we send createBackRefs=false so that restored hfiles do not create back reference links
// in the base hbase root dir.
RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs,
manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false);
RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize.
if (LOG.isDebugEnabled()) {
LOG.debug("Restored table dir:" + restoreDir);
FSUtils.logFileSystemState(fs, restoreDir, LOG);
}
return metaChanges;
}
示例3: testArchivingOnSingleTable
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例4: testRestore
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Execute the restore operation
* @param snapshotDir The snapshot directory to use as "restore source"
* @param sd The snapshot descriptor
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
new FSTableDescriptors(conf).createTableDescriptor(htdClone);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
helper.restoreHdfsRegions();
LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
}
示例5: logFileSystemState
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void logFileSystemState(Log log) throws IOException {
FSUtils.logFileSystemState(fs, rootdir, log);
}
示例6: testOfflineTableSnapshot
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Test snapshotting a table that is offline
* @throws Exception
*/
@Test (timeout=300000)
public void testOfflineTableSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
UTIL.loadTable(table, TEST_FAM, false);
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// XXX if this is flakey, might want to consider using the async version and looping as
// disableTable can succeed and still timeout.
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the disabled table
final String SNAPSHOT_NAME = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
SnapshotDescription desc = SnapshotDescription.newBuilder()
.setType(SnapshotDescription.Type.DISABLED)
.setTable(STRING_TABLE_NAME)
.setName(SNAPSHOT_NAME)
.setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
.build();
admin.snapshot(desc);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
}
示例7: testOfflineTableSnapshotWithEmptyRegions
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
// test with an empty table with one region
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the disabled table
byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegions");
admin.snapshot(snapshot, TABLE_NAME);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
List<byte[]> nonEmptyCfs = Lists.newArrayList();
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs,
rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
}
示例8: validateTableCreation
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static void validateTableCreation(final HMaster master, final TableName tableName,
final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException {
// check filesystem
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
assertTrue(fs.exists(tableDir));
FSUtils.logFileSystemState(fs, tableDir, LOG);
List<Path> allRegionDirs = FSUtils.getRegionDirs(fs, tableDir);
for (int i = 0; i < regions.length; ++i) {
Path regionDir = new Path(tableDir, regions[i].getEncodedName());
assertTrue(regions[i] + " region dir does not exist", fs.exists(regionDir));
assertTrue(allRegionDirs.remove(regionDir));
List<Path> allFamilyDirs = FSUtils.getFamilyDirs(fs, regionDir);
for (int j = 0; j < family.length; ++j) {
final Path familyDir = new Path(regionDir, family[j]);
if (hasFamilyDirs) {
assertTrue(family[j] + " family dir does not exist", fs.exists(familyDir));
assertTrue(allFamilyDirs.remove(familyDir));
} else {
// TODO: WARN: Modify Table/Families does not create a family dir
if (!fs.exists(familyDir)) {
LOG.warn(family[j] + " family dir does not exist");
}
allFamilyDirs.remove(familyDir);
}
}
assertTrue("found extraneous families: " + allFamilyDirs, allFamilyDirs.isEmpty());
}
assertTrue("found extraneous regions: " + allRegionDirs, allRegionDirs.isEmpty());
// check meta
assertTrue(MetaTableAccessor.tableExists(master.getConnection(), tableName));
assertEquals(regions.length, countMetaRegions(master, tableName));
// check htd
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue("table descriptor not found", htd != null);
for (int i = 0; i < family.length; ++i) {
assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
}
assertEquals(family.length, htd.getFamilies().size());
}
示例9: testWeNeverCacheTmpDirAndLoadIt
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testWeNeverCacheTmpDirAndLoadIt() throws Exception {
final AtomicInteger count = new AtomicInteger(0);
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles()) {
@Override
List<String> getSnapshotsInProgress() throws IOException {
List<String> result = super.getSnapshotsInProgress();
count.incrementAndGet();
return result;
}
@Override public void triggerCacheRefreshForTesting() {
super.triggerCacheRefreshForTesting();
}
};
SnapshotMock.SnapshotBuilder complete =
createAndTestSnapshotV1(cache, "snapshot", false, false);
SnapshotMock.SnapshotBuilder inProgress =
createAndTestSnapshotV1(cache, "snapshotInProgress", true, false);
int countBeforeCheck = count.get();
FSUtils.logFileSystemState(fs, rootDir, LOG);
List<FileStatus> allStoreFiles = getStoreFilesForSnapshot(complete);
Iterable<FileStatus> deletableFiles = cache.getUnreferencedFiles(allStoreFiles);
assertTrue(Iterables.isEmpty(deletableFiles));
// no need for tmp dir check as all files are accounted for.
assertEquals(0, count.get() - countBeforeCheck);
// add a random file to make sure we refresh
FileStatus randomFile = mockStoreFile(UUID.randomUUID().toString());
allStoreFiles.add(randomFile);
deletableFiles = cache.getUnreferencedFiles(allStoreFiles);
assertEquals(randomFile, Iterables.getOnlyElement(deletableFiles));
assertEquals(1, count.get() - countBeforeCheck); // we check the tmp directory
}
示例10: testReferenceToHFileLink
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* This test creates an hfile and then the dir structures and files to verify that references
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
public void testReferenceToHFileLink() throws IOException {
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
// adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
f.createReader();
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
f.closeReader(true);
// OK test the thing
FSUtils.logFileSystemState(fs, this.testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
int count = 1;
HFileScanner s = hsfA.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertTrue(count > 0); // read some rows here
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
sB.seekTo();
//count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
}
// read the rest of the rows
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例11: logFileSystemState
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Log the current state of the region
*
* @param LOG log to output information
* @throws IOException if an unexpected exception occurs
*/
void logFileSystemState(final Log LOG) throws IOException {
FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
}