本文整理汇总了Java中org.apache.htrace.core.HTraceConfiguration.fromKeyValuePairs方法的典型用法代码示例。如果您正苦于以下问题:Java HTraceConfiguration.fromKeyValuePairs方法的具体用法?Java HTraceConfiguration.fromKeyValuePairs怎么用?Java HTraceConfiguration.fromKeyValuePairs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.htrace.core.HTraceConfiguration
的用法示例。
在下文中一共展示了HTraceConfiguration.fromKeyValuePairs方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testParseHostPort
import org.apache.htrace.core.HTraceConfiguration; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testParseHostPort() throws Exception {
InetSocketAddress addr = new Conf(
HTraceConfiguration.fromKeyValuePairs(
Conf.ADDRESS_KEY, "example.com:8080")).endpoint;
Assert.assertEquals("example.com", addr.getHostName());
Assert.assertEquals(8080, addr.getPort());
addr = new Conf(
HTraceConfiguration.fromKeyValuePairs(
Conf.ADDRESS_KEY, "127.0.0.1:8081")).endpoint;
Assert.assertEquals("127.0.0.1", addr.getHostName());
Assert.assertEquals(8081, addr.getPort());
addr = new Conf(
HTraceConfiguration.fromKeyValuePairs(
Conf.ADDRESS_KEY, "[ff02:0:0:0:0:0:0:12]:9096")).endpoint;
Assert.assertEquals("ff02:0:0:0:0:0:0:12", addr.getHostName());
Assert.assertEquals(9096, addr.getPort());
}
示例2: verifyFail
import org.apache.htrace.core.HTraceConfiguration; //导入方法依赖的package包/类
private static void verifyFail(String hostPort) {
try {
new Conf(HTraceConfiguration.fromKeyValuePairs(
Conf.ADDRESS_KEY, hostPort));
Assert.fail("Expected bad host:port configuration " + hostPort +
" to fail, but it succeeded.");
} catch (IOException e) {
// expected
}
}
示例3: testKafkaTransport
import org.apache.htrace.core.HTraceConfiguration; //导入方法依赖的package包/类
@Test
public void testKafkaTransport() throws Exception {
String topic = "zipkin";
// Kafka setup
EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
KafkaConfig config = new KafkaConfig(props);
KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime());
Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer));
TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties());
zkClient.close();
TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000);
// HTrace
HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs(
"sampler.classes", "AlwaysSampler",
"span.receiver.classes", ZipkinSpanReceiver.class.getName(),
"zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(),
"zipkin.kafka.topic", topic,
ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName()
);
final Tracer tracer = new Tracer.Builder("test-tracer")
.tracerPool(new TracerPool("test-tracer-pool"))
.conf(hTraceConfiguration)
.build();
String scopeName = "test-kafka-transport-scope";
TraceScope traceScope = tracer.newScope(scopeName);
traceScope.close();
tracer.close();
// Kafka consumer
Properties consumerProps = new Properties();
consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect"));
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group");
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest");
ConsumerConnector connector =
kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps));
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);
ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator();
// Test
Assert.assertTrue("We should have one message in Kafka", it.hasNext());
Span span = new Span();
new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message());
Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName);
kafkaServer.shutdown();
}