本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.getConfiguration方法的具体用法?Java HBaseTestingUtility.getConfiguration怎么用?Java HBaseTestingUtility.getConfiguration使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseTestingUtility
的用法示例。
在下文中一共展示了HBaseTestingUtility.getConfiguration方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUpBefore
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Before
public void setUpBefore() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
TEST_UTIL.startMiniCluster(3);
conf = TEST_UTIL.getConfiguration();
this.connection = ConnectionFactory.createConnection(conf);
assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);
// setup the table
table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
tableIdx++;
htbl = setupTable(table);
populateTable(htbl);
assertEquals(5, scanMeta());
LOG.info("Table " + table + " has " + tableRowCount(conf, table)
+ " entries.");
assertEquals(16, tableRowCount(conf, table));
TEST_UTIL.getHBaseAdmin().disableTable(table);
assertEquals(1, TEST_UTIL.getHBaseAdmin().listTables().length);
}
示例2: verifyFileInDirWithStoragePolicy
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
conf.set(HConstants.WAL_STORAGE_POLICY, policy);
MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
try {
assertTrue(FSUtils.isHDFS(conf));
FileSystem fs = FileSystem.get(conf);
Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
fs.mkdirs(testDir);
FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
String file = UUID.randomUUID().toString();
Path p = new Path(testDir, file);
WriteDataToHDFS(fs, p, 4096);
// will assert existance before deleting.
cleanupFile(fs, testDir);
} finally {
cluster.shutdown();
}
}
示例3: setupBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
Configuration conf = TEST_UTIL.getConfiguration();
// Up the handlers; this test needs more than usual.
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
enableSecurity(conf);
verifyConfiguration(conf);
// We expect 0.98 scanning semantics
conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);
TEST_UTIL.startMiniCluster();
TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);
READER = User.createUserForTesting(conf, "reader", new String[0]);
LIMITED = User.createUserForTesting(conf, "limited", new String[0]);
DENIED = User.createUserForTesting(conf, "denied", new String[0]);
}
示例4: testReadAndWriteHRegionInfoFile
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
HBaseTestingUtility htu = new HBaseTestingUtility();
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
Path basedir = htu.getDataTestDir();
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
// Create a region. That'll write the .regioninfo file.
HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
fsTableDescriptors.get(TableName.META_TABLE_NAME));
// Get modtime on the file.
long modtime = getModTime(r);
HRegion.closeHRegion(r);
Thread.sleep(1001);
r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
null, htu.getConfiguration());
// Ensure the file is not written for a second time.
long modtime2 = getModTime(r);
assertEquals(modtime, modtime2);
// Now load the file.
HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
assertTrue(hri.equals(deserializedHri));
}
示例5: setUpBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniDFSCluster(3);
cluster = TEST_UTIL.getDFSCluster();
fs = cluster.getFileSystem();
}
示例6: setUpBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
utility = new HBaseTestingUtility();
utility.startMiniZKCluster();
conf = utility.getConfiguration();
zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
replicationZNode = ZKUtil.joinZNode(zkw.baseZNode, replicationZNodeName);
KEY_ONE = initPeerClusterState("/hbase1");
KEY_TWO = initPeerClusterState("/hbase2");
}
示例7: setupBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniZKCluster();
// register token type for protocol
SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(),
new SecurityInfo("hbase.test.kerberos.principal",
AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN));
// security settings only added after startup so that ZK does not require SASL
Configuration conf = TEST_UTIL.getConfiguration();
conf.set("hadoop.security.authentication", "kerberos");
conf.set("hbase.security.authentication", "kerberos");
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true);
server = new TokenServer(conf);
serverThread = new Thread(server);
Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString());
// wait for startup
while (!server.isStarted() && !server.isStopped()) {
Thread.sleep(10);
}
server.rpcServer.refreshAuthManager(new PolicyProvider() {
@Override
public Service[] getServices() {
return new Service [] {
new Service("security.client.protocol.acl",
AuthenticationProtos.AuthenticationService.BlockingInterface.class)};
}
});
ZKClusterId.setClusterId(server.getZooKeeper(), clusterId);
secretManager = (AuthenticationTokenSecretManager)server.getSecretManager();
while(secretManager.getCurrentKey() == null) {
Thread.sleep(1);
}
}
示例8: setupBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.startMiniZKCluster();
Configuration conf = TEST_UTIL.getConfiguration();
ZooKeeperWatcher zk = newZK(conf, "server1", new MockAbortable());
AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2];
tmp[0] = new AuthenticationTokenSecretManagerForTest(
conf, zk, "server1", 60*60*1000, 60*1000);
tmp[0].start();
zk = newZK(conf, "server2", new MockAbortable());
tmp[1] = new AuthenticationTokenSecretManagerForTest(
conf, zk, "server2", 60*60*1000, 60*1000);
tmp[1].start();
while (KEY_MASTER == null) {
for (int i=0; i<2; i++) {
if (tmp[i].isMaster()) {
KEY_MASTER = tmp[i];
KEY_SLAVE = tmp[ (i+1) % 2 ];
break;
}
}
Thread.sleep(500);
}
LOG.info("Master is "+KEY_MASTER.getName()+
", slave is "+KEY_SLAVE.getName());
}
示例9: testDeleteAndExists
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testDeleteAndExists() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
FileSystem fs = FileSystem.get(conf);
FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
// then that the correct file is created
String file = UUID.randomUUID().toString();
Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
try {
FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null);
out.close();
assertTrue("The created file should be present", FSUtils.isExists(fs, p));
// delete the file with recursion as false. Only the file will be deleted.
FSUtils.delete(fs, p, false);
// Create another file
FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null);
out1.close();
// delete the file with recursion as false. Still the file only will be deleted
FSUtils.delete(fs, p1, true);
assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
// and then cleanup
} finally {
FSUtils.delete(fs, p, true);
FSUtils.delete(fs, p1, true);
}
}
示例10: createTableAndSnapshot
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
throws Exception {
try {
util.deleteTable(tableName);
} catch(Exception ex) {
// ignore
}
if (numRegions > 1) {
util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
} else {
util.createTable(tableName, FAMILIES);
}
Admin admin = util.getHBaseAdmin();
// put some stuff in the table
HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES);
Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
// load different values
byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value);
// cause flush to create new files in the region
admin.flush(tableName);
table.close();
}
示例11: setUp
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
testUtil = new HBaseTestingUtility();
testUtil.startMiniDFSCluster(1);
testUtil.startMiniZKCluster(1);
testUtil.createRootDir();
cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}
示例12: testRenameAndSetModifyTime
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testRenameAndSetModifyTime() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
assertTrue(FSUtils.isHDFS(conf));
FileSystem fs = FileSystem.get(conf);
Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
String file = UUID.randomUUID().toString();
Path p = new Path(testDir, file);
FSDataOutputStream out = fs.create(p);
out.close();
assertTrue("The created file should be present", FSUtils.isExists(fs, p));
long expect = System.currentTimeMillis() + 1000;
assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
mockEnv.setValue(expect);
EnvironmentEdgeManager.injectEdge(mockEnv);
try {
String dstFile = UUID.randomUUID().toString();
Path dst = new Path(testDir , dstFile);
assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
cluster.shutdown();
} finally {
EnvironmentEdgeManager.reset();
}
}
示例13: setupBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
CONF = TEST_UTIL.getConfiguration();
CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
RowCountEndpoint.class.getName());
TEST_UTIL.startMiniCluster();
TEST_UTIL.createTable(TEST_TABLE, new byte[][]{TEST_FAMILY});
}
示例14: MockServer
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
* @param htu Testing utility to use
* @param zkw If true, create a zkw.
* @throws ZooKeeperConnectionException
* @throws IOException
*/
public MockServer(final HBaseTestingUtility htu, final boolean zkw)
throws ZooKeeperConnectionException, IOException {
this.htu = htu;
this.zk = zkw?
new ZooKeeperWatcher(htu.getConfiguration(), NAME.toString(), this, true):
null;
}
示例15: doTestWithMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
Job job = new Job(util.getConfiguration());
Scan scan = new Scan(startRow, endRow); // limit the scan
job.setJarByClass(util.getClass());
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job, true, tableDir);
job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
job.setNumReduceTasks(1);
job.setOutputFormatClass(NullOutputFormat.class);
Assert.assertTrue(job.waitForCompletion(true));
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}