本文整理汇总了Java中kafka.zk.EmbeddedZookeeper.connectString方法的典型用法代码示例。如果您正苦于以下问题:Java EmbeddedZookeeper.connectString方法的具体用法?Java EmbeddedZookeeper.connectString怎么用?Java EmbeddedZookeeper.connectString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.zk.EmbeddedZookeeper
的用法示例。
在下文中一共展示了EmbeddedZookeeper.connectString方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public void initialize() {
if (initialized) {
throw new IllegalStateException("Context has been already initialized");
}
zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
zkClient = new ZkClient(zkServer.connectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
port = TestUtils.choosePort();
KafkaConfig config = new KafkaConfig(TestUtils.createBrokerConfig(brokerId, port, true));
Time mock = new MockTime();
kafkaServer = new KafkaServer(config, mock);
kafkaServer.startup();
initialized = true;
}
示例2: start
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public void start() throws IOException {
String zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient =
new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
brokerPort = TestUtils.choosePort();
logger.info("{}", brokerPort);;
Properties props = TestUtils.createBrokerConfig(BROKER_ID, brokerPort, true);
// props.setProperty("zookeeper.connect", String.valueOf(zkPort));
props.setProperty("zookeeper.session.timeout.ms", "30000");
props.setProperty("zookeeper.connection.timeout.ms", "30000");
logger.info("{}", props);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
示例3: startServer
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public static void startServer() throws RuntimeException {
if ( serverStarted && serverClosed ) {
throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
}
if (!serverStarted) {
serverStarted = true;
zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaPort = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
}
示例4: start
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
@Override
public void start() {
// setup Zookeeper
if(zookeeperConnectString == null) {
String zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zookeeperConnectString = zkServer.connectString();
}
zkClient = new ZkClient(zookeeperConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
// setup Broker
Properties props = TestUtils.createBrokerConfig(0, brokerPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
for(Topic topic : getTopics()) {
try {
createTopic(topic.name, topic.numPartitions, true);
} catch (InterruptedException e) {
throw new RuntimeException("Unable to create topic", e);
}
}
postStartCallback.apply(this);
}
示例5: startServer
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public static void startServer() throws RuntimeException {
if (serverStarted && serverClosed) {
throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
}
if (!serverStarted) {
serverStarted = true;
zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaPort = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
}
示例6: setup
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
@BeforeClass
public static void setup() {
zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaServer = TestUtils.createServer(new KafkaConfig(TestUtils.createBrokerConfig(0, brokerPort, true)),
new MockTime());
kafkaServers = new ArrayList<>();
kafkaServers.add(kafkaServer);
}
示例7: startZookeeper
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public void startZookeeper() {
zkConnect = TestZKUtils.zookeeperConnect();
try {
zkServer = new EmbeddedZookeeper(zkConnect);
} catch (Exception ex) {
String msg = Utils.format("Error starting zookeeper {}: {}", zkConnect, ex);
throw new RuntimeException(msg, ex);
}
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
}
示例8: startZookeeper
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
public static void startZookeeper() {
zkConnect = TestZKUtils.zookeeperConnect();
try {
zkServer = new EmbeddedZookeeper(zkConnect);
} catch (Exception ex) {
String msg = Utils.format("Error starting zookeeper {}: {}", zkConnect, ex);
throw new RuntimeException(msg, ex);
}
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
}
示例9: testKafkaTransport
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
@Test
public void testKafkaTransport() throws Exception {
String topic = "zipkin";
// Kafka setup
EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
KafkaConfig config = new KafkaConfig(props);
KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime());
Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer));
TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties());
zkClient.close();
TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000);
// HTrace
HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler",
"span.receiver.classes", ZipkinSpanReceiver.class.getName(),
"zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(),
"zipkin.kafka.topic", topic,
ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName()
);
final Tracer tracer = new Tracer.Builder("test-tracer")
.tracerPool(new TracerPool("test-tracer-pool"))
.conf(hTraceConfiguration)
.build();
String scopeName = "test-kafka-transport-scope";
TraceScope traceScope = tracer.newScope(scopeName);
traceScope.close();
tracer.close();
// Kafka consumer
Properties consumerProps = new Properties();
consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect"));
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group");
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest");
ConsumerConnector connector =
kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps));
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);
ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator();
// Test
Assert.assertTrue("We should have one message in Kafka", it.hasNext());
Span span = new Span();
new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message());
Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName);
kafkaServer.shutdown();
}
示例10: before
import kafka.zk.EmbeddedZookeeper; //导入方法依赖的package包/类
@Before
public void before(TestContext context) throws Exception {
vertx = Vertx.vertx();
// Setup Zookeeper
logger.info("Starting Zookeeper");
zookeeper = new EmbeddedZookeeper("zookeeper.dev:" + kafka.utils.TestUtils.choosePort());
final Async async = context.async();
// Setup Kafka brokers
List<Integer> kafkaPorts = new ArrayList<>();
// -1 for any available port
kafkaPorts.add(-1);
kafkaPorts.add(-1);
kafkaPorts.add(-1);
final int replicationFactor = 2;
final int numberOfPartitions = 5;
Properties properties = new Properties();
properties.setProperty("offsets.topic.replication.factor", String.valueOf(replicationFactor));
properties.setProperty("zookeeper.connection.timeout.ms", "10000");
properties.setProperty("num.partitions", String.valueOf(numberOfPartitions));
logger.info("Starting Kafka cluster");
kafkaServer = new EmbeddedKafkaServer(vertx, zookeeper.connectString(), properties, kafkaPorts);
kafkaServer.start(asyncResult -> {
logger.info("Kafka cluster started, setting up topics...");
kafkaServer.createTopic(topic);
System.out.println(kafkaServer.metaDataDump());
System.out.println("Partition: " + 0 + ": lead broker is: " + kafkaServer.leadBroker(topic, 0));
// Create Kafka producer
Properties kafkaProducerProps = new Properties();
kafkaProducerProps.put("bootstrap.servers", kafkaServer.getBootstrapServers());
kafkaProducerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaProducerProps.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
producer = new KafkaProducer<>(kafkaProducerProps);
// deleting zookeeper information to make sure the consumer starts from the beginning
// see https://stackoverflow.com/questions/14935755/how-to-get-data-from-old-offset-point-in-kafka
kafkaServer.zkClient().delete("/consumers/" + groupId);
async.complete();
});
}