本文整理汇总了Java中org.apache.hadoop.hbase.Server.stop方法的典型用法代码示例。如果您正苦于以下问题:Java Server.stop方法的具体用法?Java Server.stop怎么用?Java Server.stop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.Server
的用法示例。
在下文中一共展示了Server.stop方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCleanParent
import org.apache.hadoop.hbase.Server; //导入方法依赖的package包/类
@Test
public void testCleanParent() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
Server server = new MockServer(htu);
try {
MasterServices services = new MockMasterServices(server);
CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent =
new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
Bytes.toBytes("eee"));
HRegionInfo splita =
new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
Bytes.toBytes("ccc"));
HRegionInfo splitb =
new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
// Add a reference under splitA directory so we don't clear out the parent.
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir =
FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, splita,
htd.getColumnFamilies()[0].getName());
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Path path = ref.write(fs, p);
assertTrue(fs.exists(path));
assertFalse(janitor.cleanParent(parent, r));
// Remove the reference file and try again.
assertTrue(fs.delete(p, true));
assertTrue(janitor.cleanParent(parent, r));
} finally {
server.stop("shutdown");
}
}
示例2: testArchiveOldRegion
import org.apache.hadoop.hbase.Server; //导入方法依赖的package包/类
@Test
public void testArchiveOldRegion() throws Exception {
String table = "table";
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
Server server = new MockServer(htu);
MasterServices services = new MockMasterServices(server);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("ccc"),
Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result parentMetaRow = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
tabledir, htd.getColumnFamilies()[0].getName());
LOG.debug("Table dir:" + tabledir);
LOG.debug("Store dir:" + storedir);
LOG.debug("Store archive dir:" + storeArchive);
// add a couple of store files that we can check for
FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
int index = 0;
for (FileStatus file : storeFiles) {
LOG.debug("Have store file:" + file.getPath());
assertEquals("Got unexpected store file", mockFiles[index].getPath(),
storeFiles[index].getPath());
index++;
}
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, parentMetaRow));
LOG.debug("Finished cleanup of parent region");
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
logFiles("archived files", storeFiles);
logFiles("archived files", archivedStoreFiles);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// cleanup
FSUtils.delete(fs, rootdir, true);
services.stop("Test finished");
server.stop("Test finished");
janitor.cancel(true);
}
示例3: testDuplicateHFileResolution
import org.apache.hadoop.hbase.Server; //导入方法依赖的package包/类
/**
* Test that if a store file with the same name is present as those already backed up cause the
* already archived files to be timestamped backup
*/
@Test
public void testDuplicateHFileResolution() throws Exception {
String table = "table";
HBaseTestingUtility htu = new HBaseTestingUtility();
setRootDirAndCleanIt(htu, "testCleanParent");
Server server = new MockServer(htu);
MasterServices services = new MockMasterServices(server);
// create the janitor
CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions.
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
htd.addFamily(new HColumnDescriptor("f"));
HRegionInfo parent = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
HRegionInfo splita = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
// Test that when both daughter regions are in place, that we do not
// remove the parent.
Result r = createResult(parent, splita, splitb);
FileSystem fs = FileSystem.get(htu.getConfiguration());
Path rootdir = services.getMasterFileSystem().getRootDir();
// have to set the root directory since we use it in HFileDisposer to figure out to get to the
// archive directory. Otherwise, it just seems to pick the first root directory it can find (so
// the single test passes, but when the full suite is run, things get borked).
FSUtils.setRootDir(fs.getConf(), rootdir);
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
System.out.println("Old root:" + rootdir);
System.out.println("Old table:" + tabledir);
System.out.println("Old store:" + storedir);
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
tabledir, htd.getColumnFamilies()[0].getName());
System.out.println("Old archive:" + storeArchive);
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// get the current store files for comparison
FileStatus[] storeFiles = fs.listStatus(storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
// now add store files with the same names as before to check backup
// enable archiving, make sure that files get archived
addMockStoreFiles(2, services, storedir);
// do the cleaning of the parent
assertTrue(janitor.cleanParent(parent, r));
// and now check to make sure that the files have actually been archived
archivedStoreFiles = fs.listStatus(storeArchive);
assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
// cleanup
services.stop("Test finished");
server.stop("shutdown");
janitor.cancel(true);
}