本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster类的典型用法代码示例。如果您正苦于以下问题:Java MiniZooKeeperCluster类的具体用法?Java MiniZooKeeperCluster怎么用?Java MiniZooKeeperCluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MiniZooKeeperCluster类属于org.apache.hadoop.hbase.zookeeper包,在下文中一共展示了MiniZooKeeperCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMasterRestartAfterNameSpaceEnablingNodeIsCreated
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@Test
public void testMasterRestartAfterNameSpaceEnablingNodeIsCreated() throws Exception {
// Step 1: start mini zk cluster.
MiniZooKeeperCluster zkCluster;
zkCluster = TEST_UTIL.startMiniZKCluster();
// Step 2: add an orphaned system table ZNODE
TableName tableName = TableName.valueOf("hbase:namespace");
ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
builder.setState(ZooKeeperProtos.Table.State.ENABLED);
byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
ZKUtil.createSetData(zkw, znode, data);
LOG.info("Create an orphaned Znode " + znode + " with data " + data);
// Step 3: link the zk cluster to hbase cluster
TEST_UTIL.setZkCluster(zkCluster);
// Step 4: start hbase cluster and expect master to start successfully.
TEST_UTIL.startMiniCluster();
assertTrue(TEST_UTIL.getHBaseCluster().getLiveMasterThreads().size() == 1);
}
示例2: startMiniZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
* the port mentionned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(final File dir,
int zooKeeperServerNum)
throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0){
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort));
return this.zkCluster;
}
示例3: startMiniZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
* the port mentionned is used as the default port for ZooKeeper.
*/
public MiniZooKeeperCluster startMiniZKCluster()
throws Exception {
File zkDataPath = new File(testBaseDir, "zk");
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + zkDataPath);
}
this.zkCluster = new MiniZooKeeperCluster(conf);
final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0){
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
int clientPort = this.zkCluster.startup(zkDataPath, 1);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
LOG.info("MiniZooKeeperCluster started");
return this.zkCluster;
}
示例4: startMiniZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set the
* port mentioned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum,
int[] clientPortList) throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0) {
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ? clientPortList.length
: zooKeeperServerNum;
for (int i = 0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir, zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
return this.zkCluster;
}
示例5: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
ZK_DIR = new File(System.getProperty("java.io.tmpdir") + File.separator + "resulttosolrmapperfactory.zktest");
ZK_CLIENT_PORT = getFreePort();
ZK_CLUSTER = new MiniZooKeeperCluster();
ZK_CLUSTER.setDefaultClientPort(ZK_CLIENT_PORT);
ZK_CLUSTER.startup(ZK_DIR);
SOLR_TEST_UTILITY = new SolrTestingUtility(ZK_CLIENT_PORT, NetUtils.getFreePort());
SOLR_TEST_UTILITY.start();
SOLR_TEST_UTILITY.uploadConfig("config1",
Resources.toByteArray(Resources.getResource(ResultToSolrMapperFactoryTest.class, "schema.xml")),
Resources.toByteArray(Resources.getResource(ResultToSolrMapperFactoryTest.class, "solrconfig.xml")));
SOLR_TEST_UTILITY.createCore("collection1_core1", "collection1", "config1", 1);
COLLECTION1 = new CloudSolrClient.Builder().withZkHost(SOLR_TEST_UTILITY.getZkConnectString()).build();
COLLECTION1.setDefaultCollection("collection1");
}
示例6: startMiniZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
* the port mentionned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(final File dir,
final int zooKeeperServerNum,
final int [] clientPortList)
throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0){
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ?
clientPortList.length : zooKeeperServerNum;
for (int i=0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort));
return this.zkCluster;
}
示例7: testMiniZooKeeperWithOneServer
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@Test
public void testMiniZooKeeperWithOneServer() throws Exception {
HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster();
try {
assertEquals(0, cluster1.getBackupZooKeeperServerNum());
assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1));
} finally {
hbt.shutdownMiniZKCluster();
}
}
示例8: startMiniZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
private MiniZooKeeperCluster startMiniZKCluster(final File dir,
int zooKeeperServerNum)
throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort));
return this.zkCluster;
}
示例9: testMiniZooKeeper
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@Test public void testMiniZooKeeper() throws Exception {
HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster();
try {
assertEquals(0, cluster1.getBackupZooKeeperServerNum());
assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1));
} finally {
hbt.shutdownMiniZKCluster();
}
// set up zookeeper cluster with 5 zk servers
MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(5);
int defaultClientPort = 21818;
cluster2.setDefaultClientPort(defaultClientPort);
try {
assertEquals(4, cluster2.getBackupZooKeeperServerNum());
// killing the current active zk server
assertTrue((cluster2.killCurrentActiveZooKeeperServer() >= defaultClientPort));
assertTrue((cluster2.killCurrentActiveZooKeeperServer() >= defaultClientPort));
assertEquals(2, cluster2.getBackupZooKeeperServerNum());
assertEquals(3, cluster2.getZooKeeperServerNum());
// killing the backup zk servers
cluster2.killOneBackupZooKeeperServer();
cluster2.killOneBackupZooKeeperServer();
assertEquals(0, cluster2.getBackupZooKeeperServerNum());
assertEquals(1, cluster2.getZooKeeperServerNum());
// killing the last zk server
assertTrue((cluster2.killCurrentActiveZooKeeperServer() == -1));
// this should do nothing.
cluster2.killOneBackupZooKeeperServer();
assertEquals(-1, cluster2.getBackupZooKeeperServerNum());
assertEquals(0, cluster2.getZooKeeperServerNum());
} finally {
hbt.shutdownMiniZKCluster();
}
}
示例10: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1 = HBaseConfiguration.create();
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
conf1.setLong("replication.source.sleepforretries", 100);
// Each WAL is about 120 bytes
conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, 200);
conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
new ZKWatcher(conf1, "cluster1", null, true);
conf2 = new Configuration(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
new ZKWatcher(conf2, "cluster2", null, true);
ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
ReplicationPeerConfig rpc = new ReplicationPeerConfig();
rpc.setClusterKey(utility2.getClusterKey());
utility1.startMiniCluster(1, 1);
utility2.startMiniCluster(1, 1);
admin1.addPeer("peer1", rpc, null);
admin1.addPeer("peer2", rpc, null);
admin1.addPeer("peer3", rpc, null);
}
示例11: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
ZK_DIR = new File(System.getProperty("java.io.tmpdir") + File.separator + "hbaseindexer.zktest");
ZK_CLIENT_PORT = getFreePort();
ZK_CLUSTER = new MiniZooKeeperCluster();
ZK_CLUSTER.setDefaultClientPort(ZK_CLIENT_PORT);
ZK_CLUSTER.startup(ZK_DIR);
ZK = ZkUtil.connect("localhost:" + ZK_CLIENT_PORT, 15000);
INDEXER_MODEL = new IndexerModelImpl(ZK, "/ngdata/hbaseindexer");
}
示例12: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
ZK_DIR = new File(System.getProperty("java.io.tmpdir") + File.separator + "hbaseindexer.zklocktest");
FileUtils.deleteDirectory(ZK_DIR);
ZK_CLIENT_PORT = getFreePort();
ZK_CLUSTER = new MiniZooKeeperCluster();
ZK_CLUSTER.setDefaultClientPort(ZK_CLIENT_PORT);
ZK_CLUSTER.startup(ZK_DIR);
}
示例13: setZKCluster
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
void setZKCluster(final MiniZooKeeperCluster zkcluster) {
this.zkcluster = zkcluster;
}
示例14: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1 = HBaseConfiguration.create();
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// smaller block size and capacity to trigger more operations
// and test them
conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20);
conf1.setInt("replication.source.size.capacity", 1024);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
"org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
new ZooKeeperWatcher(conf1, "cluster1", null, true);
conf2 = new Configuration(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf3 = new Configuration(conf1);
conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
new ZooKeeperWatcher(conf2, "cluster2", null, true);
utility3 = new HBaseTestingUtility(conf3);
utility3.setZkCluster(miniZK);
new ZooKeeperWatcher(conf3, "cluster3", null, true);
table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
fam = new HColumnDescriptor(noRepfamName);
table.addFamily(fam);
}
示例15: setUpBeforeClass
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf1.setInt("hfile.format.version", 3);
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
conf1.setInt("replication.source.size.capacity", 10240);
conf1.setLong("replication.source.sleepforretries", 100);
conf1.setInt("hbase.regionserver.maxlogs", 10);
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
TestCoprocessorForTagsAtSource.class.getName());
utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster();
// Have to reget conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
replicationAdmin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
// Base conf2 on conf1 so it gets the right zk cluster.
conf2 = HBaseConfiguration.create(conf1);
conf2.setInt("hfile.format.version", 3);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
TestCoprocessorForTagsAtSink.class.getName());
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
replicationAdmin.addPeer("2", utility2.getClusterKey());
LOG.info("Setup second Zk");
utility1.startMiniCluster(2);
utility2.startMiniCluster(2);
HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
fam.setMaxVersions(3);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
try (Connection conn = ConnectionFactory.createConnection(conf1);
Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
try (Connection conn = ConnectionFactory.createConnection(conf2);
Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
htable1 = utility1.getConnection().getTable(TABLE_NAME);
htable2 = utility2.getConnection().getTable(TABLE_NAME);
}