本文整理汇总了Java中org.apache.hadoop.hbase.io.HFileLink.getBackReferencesDir方法的典型用法代码示例。如果您正苦于以下问题:Java HFileLink.getBackReferencesDir方法的具体用法?Java HFileLink.getBackReferencesDir怎么用?Java HFileLink.getBackReferencesDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.HFileLink
的用法示例。
在下文中一共展示了HFileLink.getBackReferencesDir方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: isArchivedFileStillReferenced
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/** Check if for a give file in archive, if there are other snapshots/tables still
* reference it.
* @param filePath file path in archive
* @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer
* to it.
* @return true or false
*/
private boolean isArchivedFileStillReferenced(final Path filePath,
final Map<Path, Integer> snapshotFilesMap) {
Integer c = snapshotFilesMap.get(filePath);
// Check if there are other snapshots or table from clone_snapshot() (via back-reference)
// still reference to it.
if ((c != null) && (c == 1)) {
Path parentDir = filePath.getParent();
Path backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName());
try {
if (FSUtils.listStatus(fs, backRefDir) == null) {
return false;
}
} catch (IOException e) {
// For the purpose of this function, IOException is ignored and treated as
// the file is still being referenced.
}
}
return true;
}
示例2: testRestoreSnapshotDoesNotCreateBackRefLinks
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testRestoreSnapshotDoesNotCreateBackRefLinks");
String snapshotName = "foo";
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
Path tmpTableDir = UTIL.getRandomDir();
testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir);
Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration());
for (Path regionDir : FSUtils.getRegionDirs(fs, FSUtils.getTableDir(rootDir, tableName))) {
for (Path storeDir : FSUtils.getFamilyDirs(fs, regionDir)) {
for (FileStatus status : fs.listStatus(storeDir)) {
System.out.println(status.getPath());
if (StoreFileInfo.isValid(status)) {
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(),
tableName, regionDir.getName(), storeDir.getName());
Path path = HFileLink.getBackReferencesDir(storeDir, status.getPath().getName());
// assert back references directory is empty
assertFalse("There is a back reference in " + path, fs.exists(path));
path = HFileLink.getBackReferencesDir(archiveStoreDir, status.getPath().getName());
// assert back references directory is empty
assertFalse("There is a back reference in " + path, fs.exists(path));
}
}
}
}
} finally {
UTIL.getHBaseAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
示例3: testRestoreSnapshotDoesNotCreateBackRefLinks
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testRestoreSnapshotDoesNotCreateBackRefLinks");
String snapshotName = "foo";
try {
createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1);
Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName);
testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir);
Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration());
for (Path regionDir : FSUtils.getRegionDirs(fs, FSUtils.getTableDir(rootDir, tableName))) {
for (Path storeDir : FSUtils.getFamilyDirs(fs, regionDir)) {
for (FileStatus status : fs.listStatus(storeDir)) {
System.out.println(status.getPath());
if (StoreFileInfo.isValid(status)) {
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(UTIL.getConfiguration(),
tableName, regionDir.getName(), storeDir.getName());
Path path = HFileLink.getBackReferencesDir(storeDir, status.getPath().getName());
// assert back references directory is empty
assertFalse("There is a back reference in " + path, fs.exists(path));
path = HFileLink.getBackReferencesDir(archiveStoreDir, status.getPath().getName());
// assert back references directory is empty
assertFalse("There is a back reference in " + path, fs.exists(path));
}
}
}
}
} finally {
UTIL.getAdmin().deleteSnapshot(snapshotName);
UTIL.deleteTable(tableName);
tearDownCluster();
}
}
示例4: testHFileLinkCleaning
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
final TableName tableName = TableName.valueOf("test-table");
final TableName tableLinkName = TableName.valueOf("test-link");
final String hfileName = "1234567890";
final String familyName = "cf";
HRegionInfo hri = new HRegionInfo(tableName);
HRegionInfo hriLink = new HRegionInfo(tableLinkName);
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
fs.mkdirs(familyPath);
Path hfilePath = new Path(familyPath, hfileName);
fs.createNewFile(hfilePath);
// Create link to hfile
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
hriLink.getEncodedName(), familyName);
fs.mkdirs(familyLinkPath);
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
assertTrue(fs.exists(linkBackRefDir));
FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
assertEquals(1, backRefs.length);
Path linkBackRef = backRefs[0].getPath();
// Initialize cleaner
final long ttl = 1000;
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
// Link backref cannot be removed
cleaner.chore();
assertTrue(fs.exists(linkBackRef));
assertTrue(fs.exists(hfilePath));
// Link backref can be removed
fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
FSUtils.getTableDir(archiveDir, tableLinkName));
cleaner.chore();
assertFalse("Link should be deleted", fs.exists(linkBackRef));
// HFile can be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("HFile should be deleted", fs.exists(hfilePath));
// Remove everything
for (int i = 0; i < 4; ++i) {
Thread.sleep(ttl * 2);
cleaner.chore();
}
assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
示例5: testHFileLinkCleaning
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDataTestDir().toString());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
final String tableName = "test-table";
final String tableLinkName = "test-link";
final String hfileName = "1234567890";
final String familyName = "cf";
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableName));
HRegionInfo hriLink = new HRegionInfo(Bytes.toBytes(tableLinkName));
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
fs.mkdirs(familyPath);
Path hfilePath = new Path(familyPath, hfileName);
fs.createNewFile(hfilePath);
// Create link to hfile
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
hriLink.getEncodedName(), familyName);
fs.mkdirs(familyLinkPath);
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
assertTrue(fs.exists(linkBackRefDir));
FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
assertEquals(1, backRefs.length);
Path linkBackRef = backRefs[0].getPath();
// Initialize cleaner
final long ttl = 1000;
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
// Link backref cannot be removed
cleaner.chore();
assertTrue(fs.exists(linkBackRef));
assertTrue(fs.exists(hfilePath));
// Link backref can be removed
fs.rename(new Path(rootDir, tableLinkName), new Path(archiveDir, tableLinkName));
cleaner.chore();
assertFalse("Link should be deleted", fs.exists(linkBackRef));
// HFile can be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("HFile should be deleted", fs.exists(hfilePath));
// Remove everything
for (int i = 0; i < 4; ++i) {
Thread.sleep(ttl * 2);
cleaner.chore();
}
assertFalse("HFile should be deleted", fs.exists(new Path(archiveDir, tableName)));
assertFalse("Link should be deleted", fs.exists(new Path(archiveDir, tableLinkName)));
cleaner.interrupt();
}
示例6: testHFileLinkCleaning
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
final TableName tableName = TableName.valueOf("test-table");
final TableName tableLinkName = TableName.valueOf("test-link");
final String hfileName = "1234567890";
final String familyName = "cf";
HRegionInfo hri = new HRegionInfo(tableName);
HRegionInfo hriLink = new HRegionInfo(tableLinkName);
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
fs.mkdirs(familyPath);
Path hfilePath = new Path(familyPath, hfileName);
fs.createNewFile(hfilePath);
// Create link to hfile
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
hriLink.getEncodedName(), familyName);
fs.mkdirs(familyLinkPath);
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
assertTrue(fs.exists(linkBackRefDir));
FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
assertEquals(1, backRefs.length);
Path linkBackRef = backRefs[0].getPath();
// Initialize cleaner
final long ttl = 1000;
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
// Link backref cannot be removed
cleaner.chore();
assertTrue(fs.exists(linkBackRef));
assertTrue(fs.exists(hfilePath));
// Link backref can be removed
fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
FSUtils.getTableDir(archiveDir, tableLinkName));
cleaner.chore();
assertFalse("Link should be deleted", fs.exists(linkBackRef));
// HFile can be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("HFile should be deleted", fs.exists(hfilePath));
// Remove everything
for (int i = 0; i < 4; ++i) {
Thread.sleep(ttl * 2);
cleaner.chore();
}
assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
cleaner.interrupt();
}
示例7: testHFileLinkCleaning
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName tableLinkName = TableName.valueOf(name.getMethodName() + "-link");
final String hfileName = "1234567890";
final String familyName = "cf";
HRegionInfo hri = new HRegionInfo(tableName);
HRegionInfo hriLink = new HRegionInfo(tableLinkName);
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
fs.mkdirs(familyPath);
Path hfilePath = new Path(familyPath, hfileName);
fs.createNewFile(hfilePath);
// Create link to hfile
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
hriLink.getEncodedName(), familyName);
fs.mkdirs(familyLinkPath);
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
assertTrue(fs.exists(linkBackRefDir));
FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
assertEquals(1, backRefs.length);
Path linkBackRef = backRefs[0].getPath();
// Initialize cleaner
final long ttl = 1000;
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
// Link backref cannot be removed
cleaner.chore();
assertTrue(fs.exists(linkBackRef));
assertTrue(fs.exists(hfilePath));
// Link backref can be removed
fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
FSUtils.getTableDir(archiveDir, tableLinkName));
cleaner.chore();
assertFalse("Link should be deleted", fs.exists(linkBackRef));
// HFile can be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("HFile should be deleted", fs.exists(hfilePath));
// Remove everything
for (int i = 0; i < 4; ++i) {
Thread.sleep(ttl * 2);
cleaner.chore();
}
assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
示例8: testHFileLinkCleaning
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDataTestDir().toString());
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner," +
"org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner");
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
final String tableName = "test-table";
final String tableLinkName = "test-link";
final String hfileName = "1234567890";
final String familyName = "cf";
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableName));
HRegionInfo hriLink = new HRegionInfo(Bytes.toBytes(tableLinkName));
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
fs.mkdirs(familyPath);
Path hfilePath = new Path(familyPath, hfileName);
fs.createNewFile(hfilePath);
// Create link to hfile
Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
hriLink.getEncodedName(), familyName);
fs.mkdirs(familyLinkPath);
HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
assertTrue(fs.exists(linkBackRefDir));
FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
assertEquals(1, backRefs.length);
Path linkBackRef = backRefs[0].getPath();
// Initialize cleaner
final long ttl = 1000;
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
// Link backref cannot be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertTrue(fs.exists(linkBackRef));
assertTrue(fs.exists(hfilePath));
// Link backref can be removed
fs.rename(new Path(rootDir, tableLinkName), new Path(archiveDir, tableLinkName));
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("Link should be deleted", fs.exists(linkBackRef));
// HFile can be removed
Thread.sleep(ttl * 2);
cleaner.chore();
assertFalse("HFile should be deleted", fs.exists(hfilePath));
// Remove everything
for (int i = 0; i < 4; ++i) {
Thread.sleep(ttl * 2);
cleaner.chore();
}
assertFalse("HFile should be deleted", fs.exists(new Path(archiveDir, tableName)));
assertFalse("Link should be deleted", fs.exists(new Path(archiveDir, tableLinkName)));
cleaner.interrupt();
}