本文整理汇总了Java中org.apache.hadoop.hbase.Stoppable.stop方法的典型用法代码示例。如果您正苦于以下问题:Java Stoppable.stop方法的具体用法?Java Stoppable.stop怎么用?Java Stoppable.stop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.Stoppable
的用法示例。
在下文中一共展示了Stoppable.stop方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStoppedCleanerDoesNotDeleteFiles
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
@Test
public void testStoppedCleanerDoesNotDeleteFiles() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// also create a file in the top level directory
Path topFile = new Path(testDir, "topFile");
fs.create(topFile).close();
assertTrue("Test file didn't get created.", fs.exists(topFile));
// stop the chore
stop.stop("testing stop");
// run the chore
chore.chore();
// test that the file still exists
assertTrue("File got deleted while chore was stopped", fs.exists(topFile));
}
示例2: runCleaner
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* @param cleaner
*/
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
throws InterruptedException {
final ChoreService choreService = new ChoreService("CLEANER_SERVER_NAME");
// run the cleaner
choreService.scheduleChore(cleaner);
// wait for the cleaner to check all the files
finished.await();
// stop the cleaner
stop.stop("");
}
示例3: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(CONF, stopper, TABLENAME);
final ChoreService choreService = new ChoreService("TEST_SERVER");
choreService.scheduleChore(regionChecker);
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例4: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final byte[] TABLENAME = Bytes.toBytes("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例5: runCleaner
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* @param cleaner
*/
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
throws InterruptedException {
// run the cleaner
cleaner.start();
// wait for the cleaner to check all the files
finished.await();
// stop the cleaner
stop.stop("");
}
示例6: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例7: testFromClientSideWhileSplitting
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@Test
public void testFromClientSideWhileSplitting() throws Throwable {
LOG.info("Starting testFromClientSideWhileSplitting");
final TableName TABLENAME =
TableName.valueOf("testFromClientSideWhileSplitting");
final byte[] FAMILY = Bytes.toBytes("family");
//SplitTransaction will update the meta table by offlining the parent region, and adding info
//for daughters.
HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
Stoppable stopper = new StoppableImplementation();
RegionSplitter regionSplitter = new RegionSplitter(table);
RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME);
regionChecker.start();
regionSplitter.start();
//wait until the splitter is finished
regionSplitter.join();
stopper.stop(null);
if (regionChecker.ex != null) {
throw regionChecker.ex;
}
if (regionSplitter.ex != null) {
throw regionSplitter.ex;
}
//one final check
regionChecker.verify();
}
示例8: testCleaningRace
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
*/
@Test
public void testCleaningRace() throws Exception {
final long TEST_TIME = 20 * 1000;
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
TableName.valueOf("table")), "abcdef");
Path familyDir = new Path(regionDir, "cf");
Path sourceRegionDir = new Path(rootDir, regionDir);
fs.mkdirs(sourceRegionDir);
Stoppable stoppable = new StoppableImplementation();
// The cleaner should be looping without long pauses to reproduce the race condition.
HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
try {
choreService.scheduleChore(cleaner);
// Keep creating/archiving new files while the cleaner is running in the other thread
long startTime = System.currentTimeMillis();
for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
Path file = new Path(familyDir, String.valueOf(fid));
Path sourceFile = new Path(rootDir, file);
Path archiveFile = new Path(archiveDir, file);
fs.createNewFile(sourceFile);
try {
// Try to archive the file
HFileArchiver.archiveRegion(fs, rootDir,
sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location
// but it's in the archive location.
LOG.debug("hfile=" + fid + " should be in the archive");
assertTrue(fs.exists(archiveFile));
assertFalse(fs.exists(sourceFile));
} catch (IOException e) {
// The archiver is unable to archive the file. Probably HBASE-7643 race condition.
// in this case, the file should not be archived, and we should have the file
// in the original location.
LOG.debug("hfile=" + fid + " should be in the source location");
assertFalse(fs.exists(archiveFile));
assertTrue(fs.exists(sourceFile));
// Avoid to have this file in the next run
fs.delete(sourceFile, false);
}
}
} finally {
stoppable.stop("test end");
cleaner.cancel(true);
choreService.shutdown();
fs.delete(rootDir, true);
}
}
示例9: testCleaningRace
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
*/
@Test
public void testCleaningRace() throws Exception {
final long TEST_TIME = 20 * 1000;
Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
Path rootDir = UTIL.getDataTestDir("testCleaningRace");
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
Path regionDir = new Path("table", "abcdef");
Path familyDir = new Path(regionDir, "cf");
Path sourceRegionDir = new Path(rootDir, regionDir);
fs.mkdirs(sourceRegionDir);
Stoppable stoppable = new StoppableImplementation();
// The cleaner should be looping without long pauses to reproduce the race condition.
HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
try {
cleaner.start();
// Keep creating/archiving new files while the cleaner is running in the other thread
long startTime = System.currentTimeMillis();
for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
Path file = new Path(familyDir, String.valueOf(fid));
Path sourceFile = new Path(rootDir, file);
Path archiveFile = new Path(archiveDir, file);
fs.createNewFile(sourceFile);
try {
// Try to archive the file
HFileArchiver.archiveRegion(fs, rootDir,
sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location
// but it's in the archive location.
LOG.debug("hfile=" + fid + " should be in the archive");
assertTrue(fs.exists(archiveFile));
assertFalse(fs.exists(sourceFile));
} catch (IOException e) {
// The archiver is unable to archive the file. Probably HBASE-7643 race condition.
// in this case, the file should not be archived, and we should have the file
// in the original location.
LOG.debug("hfile=" + fid + " should be in the source location");
assertFalse(fs.exists(archiveFile));
assertTrue(fs.exists(sourceFile));
// Avoid to have this file in the next run
fs.delete(sourceFile, false);
}
}
} finally {
stoppable.stop("test end");
cleaner.join();
fs.delete(rootDir, true);
}
}
示例10: testCleaningRace
import org.apache.hadoop.hbase.Stoppable; //导入方法依赖的package包/类
/**
* Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
*/
@Test
public void testCleaningRace() throws Exception {
final long TEST_TIME = 20 * 1000;
Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
TableName.valueOf("table")), "abcdef");
Path familyDir = new Path(regionDir, "cf");
Path sourceRegionDir = new Path(rootDir, regionDir);
fs.mkdirs(sourceRegionDir);
Stoppable stoppable = new StoppableImplementation();
// The cleaner should be looping without long pauses to reproduce the race condition.
HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
try {
cleaner.start();
// Keep creating/archiving new files while the cleaner is running in the other thread
long startTime = System.currentTimeMillis();
for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
Path file = new Path(familyDir, String.valueOf(fid));
Path sourceFile = new Path(rootDir, file);
Path archiveFile = new Path(archiveDir, file);
fs.createNewFile(sourceFile);
try {
// Try to archive the file
HFileArchiver.archiveRegion(fs, rootDir,
sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location
// but it's in the archive location.
LOG.debug("hfile=" + fid + " should be in the archive");
assertTrue(fs.exists(archiveFile));
assertFalse(fs.exists(sourceFile));
} catch (IOException e) {
// The archiver is unable to archive the file. Probably HBASE-7643 race condition.
// in this case, the file should not be archived, and we should have the file
// in the original location.
LOG.debug("hfile=" + fid + " should be in the source location");
assertFalse(fs.exists(archiveFile));
assertTrue(fs.exists(sourceFile));
// Avoid to have this file in the next run
fs.delete(sourceFile, false);
}
}
} finally {
stoppable.stop("test end");
cleaner.join();
fs.delete(rootDir, true);
}
}