本文整理汇总了Java中org.apache.hadoop.hbase.util.StoppableImplementation类的典型用法代码示例。如果您正苦于以下问题:Java StoppableImplementation类的具体用法?Java StoppableImplementation怎么用?Java StoppableImplementation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StoppableImplementation类属于org.apache.hadoop.hbase.util包,在下文中一共展示了StoppableImplementation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSavesFilesOnRequest
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testSavesFilesOnRequest() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, NeverDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
// run the chore
chore.chore();
// verify all the files got deleted
assertTrue("File didn't get deleted", fs.exists(file));
assertTrue("Empty directory didn't get deleted", fs.exists(parent));
}
示例2: testStoppedCleanerDoesNotDeleteFiles
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testStoppedCleanerDoesNotDeleteFiles() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// also create a file in the top level directory
Path topFile = new Path(testDir, "topFile");
fs.create(topFile).close();
assertTrue("Test file didn't get created.", fs.exists(topFile));
// stop the chore
stop.stop("testing stop");
// run the chore
chore.chore();
// test that the file still exists
assertTrue("File got deleted while chore was stopped", fs.exists(topFile));
}
示例3: testMinimumNumberOfThreads
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testMinimumNumberOfThreads() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
conf.set(CleanerChore.CHORE_POOL_SIZE, "2");
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
int numProcs = Runtime.getRuntime().availableProcessors();
// Sanity
assertEquals(numProcs, chore.calculatePoolSize(Integer.toString(numProcs)));
// The implementation does not allow us to set more threads than we have processors
assertEquals(numProcs, chore.calculatePoolSize(Integer.toString(numProcs + 2)));
// Force us into the branch that is multiplying 0.0 against the number of processors
assertEquals(1, chore.calculatePoolSize("0.0"));
}
示例4: testDeletesEmptyDirectories
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testDeletesEmptyDirectories() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path child = new Path(parent, "child");
Path emptyChild = new Path(parent, "emptyChild");
Path file = new Path(child, "someFile");
fs.mkdirs(child);
fs.mkdirs(emptyChild);
// touch a new file
fs.create(file).close();
// also create a file in the top level directory
Path topFile = new Path(testDir, "topFile");
fs.create(topFile).close();
assertTrue("Test file didn't get created.", fs.exists(file));
assertTrue("Test file didn't get created.", fs.exists(topFile));
// run the chore
chore.chore();
// verify all the files got deleted
assertFalse("File didn't get deleted", fs.exists(topFile));
assertFalse("File didn't get deleted", fs.exists(file));
assertFalse("Empty directory didn't get deleted", fs.exists(child));
assertFalse("Empty directory didn't get deleted", fs.exists(parent));
}
示例5: testDoesNotCheckDirectories
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
* directory.
* @throws Exception on failure
*/
@Test
public void testDoesNotCheckDirectories() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// spy on the delegate to ensure that we don't check for directories
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
AlwaysDelete spy = Mockito.spy(delegate);
chore.cleanersChain.set(0, spy);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
assertTrue("Test parent didn't get created.", fs.exists(parent));
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
FileStatus fStat = fs.getFileStatus(parent);
chore.chore();
// make sure we never checked the directory
Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
Mockito.reset(spy);
}
示例6: testArchivingOnSingleTable
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例7: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(CONF, stopper, TABLENAME);
final ChoreService choreService = new ChoreService("TEST_SERVER");
choreService.scheduleChore(regionChecker);
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例8: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final byte[] TABLENAME = Bytes.toBytes("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例9: testArchivingOnSingleTable
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
loadFlushAndCompact(region, TEST_FAM);
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
示例10: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例11: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例12: testDoesNotCheckDirectories
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
* directory.
* @throws Exception on failure
*/
@Test
public void testDoesNotCheckDirectories() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// spy on the delegate to ensure that we don't check for directories
AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
AlwaysDelete spy = Mockito.spy(delegate);
chore.cleanersChain.set(0, spy);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
assertTrue("Test parent didn't get created.", fs.exists(parent));
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
FileStatus fStat = fs.getFileStatus(parent);
chore.chore();
// make sure we never checked the directory
Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
Mockito.reset(spy);
}
示例13: testDeleteFileWithCleanerEnabled
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testDeleteFileWithCleanerEnabled() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// Enable cleaner
chore.setEnabled(true);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path child = new Path(parent, "child");
Path file = new Path(child, "someFile");
fs.mkdirs(child);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
// run the chore
chore.chore();
// verify all the files got deleted
assertFalse("File didn't get deleted", fs.exists(file));
assertFalse("Empty directory didn't get deleted", fs.exists(child));
assertFalse("Empty directory didn't get deleted", fs.exists(parent));
}
示例14: testDeleteFileWithCleanerDisabled
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
@Test
public void testDeleteFileWithCleanerDisabled() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// Disable cleaner
chore.setEnabled(false);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path child = new Path(parent, "child");
Path file = new Path(child, "someFile");
fs.mkdirs(child);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
// run the chore
chore.chore();
// verify all the files exist
assertTrue("File got deleted with cleaner disabled", fs.exists(file));
assertTrue("Directory got deleted", fs.exists(child));
assertTrue("Directory got deleted", fs.exists(parent));
}
示例15: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.util.StoppableImplementation; //导入依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName tableName = TableName.valueOf(name.getMethodName());
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
Table table = TEST_UTIL.createTable(tableName, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(CONF, stopper, tableName);
final ChoreService choreService = new ChoreService("TEST_SERVER");
choreService.scheduleChore(regionChecker);
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw new AssertionError("regionChecker", regionChecker.ex);
}
if (regionSplitter.ex != null) {
throw new AssertionError("regionSplitter", regionSplitter.ex);
}
//one final check
regionChecker.verify();
}