本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper类的典型用法代码示例。如果您正苦于以下问题:Java RecoverableZooKeeper类的具体用法?Java RecoverableZooKeeper怎么用?Java RecoverableZooKeeper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RecoverableZooKeeper类属于org.apache.hadoop.hbase.zookeeper包,在下文中一共展示了RecoverableZooKeeper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testZNodeACLs
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void testZNodeACLs() throws IOException, KeeperException, InterruptedException {
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, "IntegrationTestZnodeACLs", null);
RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);
String baseZNode = watcher.baseZNode;
LOG.info("");
LOG.info("***********************************************************************************");
LOG.info("Checking ZK permissions, root znode: " + baseZNode);
LOG.info("***********************************************************************************");
LOG.info("");
checkZnodePermsRecursive(watcher, zk, baseZNode);
LOG.info("Checking ZK permissions: SUCCESS");
}
示例2: checkZnodePermsRecursive
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void checkZnodePermsRecursive(ZooKeeperWatcher watcher,
RecoverableZooKeeper zk, String znode) throws KeeperException, InterruptedException {
boolean expectedWorldReadable = watcher.isClientReadable(znode);
assertZnodePerms(zk, znode, expectedWorldReadable);
try {
List<String> children = zk.getChildren(znode, false);
for (String child : children) {
checkZnodePermsRecursive(watcher, zk, ZKUtil.joinZNode(znode, child));
}
} catch (KeeperException ke) {
// if we are not authenticated for listChildren, it is fine.
if (ke.code() != Code.NOAUTH) {
throw ke;
}
}
}
示例3: testGuavaConflict
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
/**
* Ensure Armeria's dependencies do not cause a trouble with hbase-shaded-client.
*
* @see <a href="https://issues.apache.org/jira/browse/HBASE-14963">HBASE-14963</a>
*/
@Test(expected = NotAllMetaRegionsOnlineException.class)
public void testGuavaConflict() throws Exception {
// Make sure Armeria is available in the class path.
assertThat(Version.identify(Server.class.getClassLoader())).isNotNull();
// Make sure newer Guava is available in the class path.
assertThat(Stopwatch.class.getDeclaredConstructor().getModifiers()).is(new Condition<>(
value -> !Modifier.isPublic(value),
"Recent Guava Stopwatch should have non-public default constructor."));
final MetaTableLocator locator = new MetaTableLocator();
final ZooKeeperWatcher zkw = mock(ZooKeeperWatcher.class);
final RecoverableZooKeeper zk = mock(RecoverableZooKeeper.class);
when(zkw.getRecoverableZooKeeper()).thenReturn(zk);
when(zk.exists(any(), any())).thenReturn(new Stat(0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0));
locator.waitMetaRegionLocation(zkw, 100);
}
示例4: testZNodeACLs
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void testZNodeACLs() throws IOException, KeeperException, InterruptedException {
ZKWatcher watcher = new ZKWatcher(conf, "IntegrationTestZnodeACLs", null);
RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);
String baseZNode = watcher.znodePaths.baseZNode;
LOG.info("");
LOG.info("***********************************************************************************");
LOG.info("Checking ZK permissions, root znode: " + baseZNode);
LOG.info("***********************************************************************************");
LOG.info("");
checkZnodePermsRecursive(watcher, zk, baseZNode);
LOG.info("Checking ZK permissions: SUCCESS");
}
示例5: checkZnodePermsRecursive
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void checkZnodePermsRecursive(ZKWatcher watcher,
RecoverableZooKeeper zk, String znode) throws KeeperException, InterruptedException {
boolean expectedWorldReadable = watcher.znodePaths.isClientReadable(znode);
assertZnodePerms(zk, znode, expectedWorldReadable);
try {
List<String> children = zk.getChildren(znode, false);
for (String child : children) {
checkZnodePermsRecursive(watcher, zk, ZNodePaths.joinZNode(znode, child));
}
} catch (KeeperException ke) {
// if we are not authenticated for listChildren, it is fine.
if (ke.code() != Code.NOAUTH && ke.code() != Code.NONODE) {
throw ke;
}
}
}
示例6: cleanZookeeper
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
/**
* Deletes just the splice-specific paths in zookeeper. Does not delete hbase paths.
*/
public static void cleanZookeeper() throws InterruptedException, KeeperException{
RecoverableZooKeeper rzk=getRecoverableZooKeeper();
String rootPath=HConfiguration.getConfiguration().getSpliceRootPath();
for(String path : HConfiguration.zookeeperPaths){
path=rootPath+path;
if(rzk.exists(path,false)!=null){
for(String child : rzk.getChildren(path,false)){
for(String grandChild : rzk.getChildren(path+"/"+child,false)){
rzk.delete(path+"/"+child+"/"+grandChild,-1);
}
rzk.delete(path+"/"+child,-1);
}
rzk.delete(path,-1);
}
}
}
示例7: assertZnodePerms
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void assertZnodePerms(RecoverableZooKeeper zk, String znode,
boolean expectedWorldReadable) throws KeeperException, InterruptedException {
Stat stat = new Stat();
List<ACL> acls = zk.getZooKeeper().getACL(znode, stat);
String[] superUsers = superUser == null ? null : superUser.split(",");
LOG.info("Checking ACLs for znode znode:" + znode + " acls:" + acls);
for (ACL acl : acls) {
int perms = acl.getPerms();
Id id = acl.getId();
// We should only set at most 3 possible ACL for 3 Ids. One for everyone, one for superuser
// and one for the hbase user
if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
// everyone should be set only if we are expecting this znode to be world readable
assertTrue(expectedWorldReadable);
// assert that anyone can only read
assertEquals(perms, Perms.READ);
} else if (superUsers != null && ZooKeeperWatcher.isSuperUserId(superUsers, id)) {
// assert that super user has all the permissions
assertEquals(perms, Perms.ALL);
} else if (new Id("sasl", masterPrincipal).equals(id)) {
// hbase.master.kerberos.principal?
assertEquals(perms, Perms.ALL);
} else {
fail("An ACL is found which is not expected for the znode:" + znode + " , ACL:" + acl);
}
}
}
示例8: getRecoverableZooKeeper
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
/**
* Gets a direct interface to a ZooKeeper instance.
*
* @return a direct interface to ZooKeeper.
*/
public static RecoverableZooKeeper getRecoverableZooKeeper(){
try{
return zkManager.getRecoverableZooKeeper();
}catch(ZooKeeperConnectionException e){
LOG.error("Unable to connect to zookeeper, aborting",e);
throw new RuntimeException(e);
}
}
示例9: recursiveSafeCreate
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
public static boolean recursiveSafeCreate(String path,byte[] bytes,List<ACL> acls,CreateMode createMode) throws InterruptedException, KeeperException{
if(path==null || path.length()<=0) return true; //nothing to do, we've gone all the way to the root
RecoverableZooKeeper rzk=getRecoverableZooKeeper();
try{
return safeCreate(path,bytes,acls,createMode,rzk);
}catch(KeeperException e){
if(e.code()==KeeperException.Code.NONODE){
//parent node doesn't exist, so recursively create it, and then try and create your node again
String parent=path.substring(0,path.lastIndexOf('/'));
recursiveSafeCreate(parent,new byte[]{},acls,CreateMode.PERSISTENT);
return safeCreate(path,bytes,acls,createMode);
}else
throw e;
}
}
示例10: safeCreate
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
public static boolean safeCreate(String path,byte[] bytes,List<ACL> acls,CreateMode createMode,RecoverableZooKeeper zooKeeper) throws KeeperException, InterruptedException{
try{
zooKeeper.create(path,bytes,acls,createMode);
return true;
}catch(KeeperException ke){
if(ke.code()!=KeeperException.Code.NODEEXISTS)
throw ke;
else
return true;
}
}
示例11: validZookeeper
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
public static boolean validZookeeper() throws InterruptedException, KeeperException{
RecoverableZooKeeper rzk=getRecoverableZooKeeper();
String rootPath=HConfiguration.getConfiguration().getSpliceRootPath();
for(String path : HConfiguration.zookeeperPaths){
if(rzk.exists(rootPath+path,false)==null)
return false;
}
return true;
}
示例12: loadEnvironment
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
public static HBaseSIEnvironment loadEnvironment(Clock clock,RecoverableZooKeeper rzk) throws IOException{
HBaseSIEnvironment env = INSTANCE;
if(env==null){
synchronized(HBaseSIEnvironment.class){
env = INSTANCE;
if(env==null){
env = INSTANCE = new HBaseSIEnvironment(rzk,clock);
}
}
}
return env;
}
示例13: HBaseSIEnvironment
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public HBaseSIEnvironment(RecoverableZooKeeper rzk,Clock clock) throws IOException{
ByteComparisons.setComparator(HBaseComparator.INSTANCE);
this.config=HConfiguration.getConfiguration();
this.timestampSource =new ZkTimestampSource(config,rzk);
this.partitionCache = PartitionCacheService.loadPartitionCache(config);
this.partitionFactory =TableFactoryService.loadTableFactory(clock, this.config,partitionCache);
TxnNetworkLayerFactory txnNetworkLayerFactory= TableFactoryService.loadTxnNetworkLayer(this.config);
this.txnStore = new CoprocessorTxnStore(txnNetworkLayerFactory,timestampSource,null);
int completedTxnCacheSize = config.getCompletedTxnCacheSize();
int completedTxnConcurrency = config.getCompletedTxnConcurrency();
this.txnSupplier = new CompletedTxnCacheSupplier(txnStore,completedTxnCacheSize,completedTxnConcurrency);
this.txnStore.setCache(txnSupplier);
this.opFactory =HOperationFactory.INSTANCE;
this.txnOpFactory = new SimpleTxnOperationFactory(exceptionFactory(),opFactory);
this.clock = clock;
this.fileSystem =new HNIOFileSystem(FileSystem.get((Configuration) config.getConfigSource().unwrapDelegate()), exceptionFactory());
this.snowflakeFactory = new HSnowflakeFactory();
this.clusterHealthFactory = new HClusterHealthFactory(rzk);
this.ignoreTxnSupplier = new IgnoreTxnSupplier(partitionFactory, txnOpFactory);
this.keepAlive = new QueuedKeepAliveScheduler(config.getTransactionKeepAliveInterval(),
config.getTransactionTimeout(),
config.getTransactionKeepAliveThreads(),
txnStore);
siDriver = SIDriver.loadDriver(this);
}
示例14: captureIncrementalChanges
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
/**
* An HFile is eligible for incremental backup if
* 1) There is an ongoing full backup, flush is not triggered by preparing and backup for this region is done.
* 2) There is no ongoing backup, AND there is a previous full/incremental backup
* 3) There is an ongoing incremental backup
* @param fileName
* @throws StandardException
*/
public static void captureIncrementalChanges( Configuration conf,
HRegion region,
String path,
FileSystem fs,
Path rootDir,
Path backupDir,
String tableName,
String fileName,
boolean preparing) throws StandardException {
boolean shouldRegister = false;
try {
RecoverableZooKeeper zooKeeper = ZkUtils.getRecoverableZooKeeper();
String spliceBackupPath = HConfiguration.getConfiguration().getBackupPath();
if (BackupUtils.existsDatabaseBackup(fs, rootDir)) {
if (LOG.isDebugEnabled()) {
SpliceLogUtils.debug(LOG, "There exists a successful full or incremental backup in the system");
}
shouldRegister = true;
}
else if (zooKeeper.exists(spliceBackupPath, false) != null) {
if (LOG.isDebugEnabled()) {
SpliceLogUtils.debug(LOG, "A backup is running");
}
shouldRegister = true;
}
if (shouldRegister) {
registerHFile(conf, fs, backupDir, region, fileName);
}
}
catch (Exception e) {
e.printStackTrace();
throw Exceptions.parseException(e);
}
}
示例15: publishServer
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; //导入依赖的package包/类
private void publishServer(RecoverableZooKeeper rzk, ServerName serverName, String hostname, int port) throws InterruptedException, KeeperException {
String root = HConfiguration.getConfiguration().getSpliceRootPath();
try {
HostAndPort hostAndPort = HostAndPort.fromParts(hostname, port);
masterPath = root + HBaseConfiguration.OLAP_SERVER_PATH + "/" + serverName;
rzk.create(masterPath, Bytes.toBytes(hostAndPort.toString()), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
rzk.getData(masterPath, this, null);
} catch (Exception e) {
LOG.error("Couldn't register OlapServer due to unexpected exception", e);
throw e;
}
}