本文整理汇总了Java中org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate类的典型用法代码示例。如果您正苦于以下问题:Java BaseHFileCleanerDelegate类的具体用法?Java BaseHFileCleanerDelegate怎么用?Java BaseHFileCleanerDelegate使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BaseHFileCleanerDelegate类属于org.apache.hadoop.hbase.master.cleaner包,在下文中一共展示了BaseHFileCleanerDelegate类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: turnOnArchiving
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
/**
* Start archiving table for given hfile cleaner
* @param tableName table to archive
* @param cleaner cleaner to check to make sure change propagated
* @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving
* @throws IOException on failure
* @throws KeeperException on failure
*/
private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner)
throws IOException, KeeperException {
// turn on hfile retention
LOG.debug("----Starting archiving for table:" + tableName);
archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName));
assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName));
// wait for the archiver to get the notification
List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting();
LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) {
// spin until propagation - should be fast
}
return cleaners;
}
示例2: setupCleanerWatching
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
/**
* Spy on the {@link LongTermArchivingHFileCleaner} to ensure we can catch when the cleaner has
* seen all the files
* @return a {@link CountDownLatch} to wait on that releases when the cleaner has been called at
* least the expected number of times.
*/
private CountDownLatch setupCleanerWatching(LongTermArchivingHFileCleaner cleaner,
List<BaseHFileCleanerDelegate> cleaners, final int expected) {
// replace the cleaner with one that we can can check
BaseHFileCleanerDelegate delegateSpy = Mockito.spy(cleaner);
final int[] counter = new int[] { 0 };
final CountDownLatch finished = new CountDownLatch(1);
Mockito.doAnswer(new Answer<Iterable<FileStatus>>() {
@Override
public Iterable<FileStatus> answer(InvocationOnMock invocation) throws Throwable {
counter[0]++;
LOG.debug(counter[0] + "/ " + expected + ") Wrapping call to getDeletableFiles for files: "
+ invocation.getArguments()[0]);
@SuppressWarnings("unchecked")
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
if (counter[0] >= expected) finished.countDown();
return ret;
}
}).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class));
cleaners.set(0, delegateSpy);
return finished;
}
示例3: setupCleanerWatching
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
/**
* Spy on the {@link LongTermArchivingHFileCleaner} to ensure we can catch when the cleaner has
* seen all the files
* @return a {@link CountDownLatch} to wait on that releases when the cleaner has been called at
* least the expected number of times.
*/
private CountDownLatch setupCleanerWatching(LongTermArchivingHFileCleaner cleaner,
List<BaseHFileCleanerDelegate> cleaners, final int expected) {
// replace the cleaner with one that we can can check
BaseHFileCleanerDelegate delegateSpy = Mockito.spy(cleaner);
final int[] counter = new int[] { 0 };
final CountDownLatch finished = new CountDownLatch(1);
Mockito.doAnswer(new Answer<Iterable<FileStatus>>() {
@Override
public Iterable<FileStatus> answer(InvocationOnMock invocation) throws Throwable {
counter[0]++;
LOG.debug(counter[0] + "/ " + expected + ") Wrapping call to getDeletableFiles for files: "
+ invocation.getArgument(0));
@SuppressWarnings("unchecked")
Iterable<FileStatus> ret = (Iterable<FileStatus>) invocation.callRealMethod();
if (counter[0] >= expected) finished.countDown();
return ret;
}
}).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class));
cleaners.set(0, delegateSpy);
return finished;
}
示例4: setupCleanerWatching
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
/**
* Spy on the {@link LongTermArchivingHFileCleaner} to ensure we can catch when the cleaner has
* seen all the files
* @return a {@link CountDownLatch} to wait on that releases when the cleaner has been called at
* least the expected number of times.
*/
private CountDownLatch setupCleanerWatching(LongTermArchivingHFileCleaner cleaner,
List<BaseHFileCleanerDelegate> cleaners, final int expected) {
// replace the cleaner with one that we can can check
BaseHFileCleanerDelegate delegateSpy = Mockito.spy(cleaner);
final int[] counter = new int[] { 0 };
final CountDownLatch finished = new CountDownLatch(1);
Mockito.doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
counter[0]++;
LOG.debug(counter[0] + "/ " + expected + ") Wrapping call to isFileDeletable for file: "
+ invocation.getArguments()[0]);
Boolean ret = (Boolean) invocation.callRealMethod();
if (counter[0] >= expected) finished.countDown();
return ret;
}
}).when(delegateSpy).isFileDeletable(Mockito.any(Path.class));
cleaners.set(0, delegateSpy);
return finished;
}
示例5: testArchivingOnSingleTable
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例6: testArchivingOnSingleTable
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例7: testArchivingOnSingleTable
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
@Test
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例8: testArchivingOnSingleTable
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<HRegion> regions = new ArrayList<>();
regions.add(region);
Mockito.doReturn(regions).when(rss).getRegions();
final CompactedHFilesDischarger compactionCleaner =
new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}