当前位置: 首页>>代码示例>>Java>>正文


Java Buffer类代码示例

本文整理汇总了Java中scala.collection.mutable.Buffer的典型用法代码示例。如果您正苦于以下问题:Java Buffer类的具体用法?Java Buffer怎么用?Java Buffer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Buffer类属于scala.collection.mutable包,在下文中一共展示了Buffer类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testServlet

import scala.collection.mutable.Buffer; //导入依赖的package包/类
@Test
public void testServlet() throws Exception {
    DefaultCamelSwaggerServlet servlet = new DefaultCamelSwaggerServlet();
    
    Buffer<RestDefinition> list = servlet.getRestDefinitions(null);
    assertEquals(1, list.size());
    RestDefinition rest = list.iterator().next();
    checkRestDefinition(rest);

    // get the RestDefinition by using the camel context id
    list = servlet.getRestDefinitions(context.getName());
    assertEquals(1, list.size());
    rest = list.iterator().next();
    checkRestDefinition(rest);
    
    RestDefinition rest2 = context.getRestDefinitions().get(0);
    checkRestDefinition(rest2);
}
 
开发者ID:HydAu,项目名称:Camel,代码行数:19,代码来源:DefaultCamelSwaggerServletTest.java

示例2: getAllPartitionIds

import scala.collection.mutable.Buffer; //导入依赖的package包/类
/**
 * 根据指定topic获取该topic的partition列表
 * @param topic
 * @return
 */
public static List<Integer> getAllPartitionIds(String topic) {
	List list = new ArrayList();
	list.add(topic);
	Buffer buffer = JavaConversions.asScalaBuffer(list);

	Map<String, Seq<Object>> topicPartMap = JavaConversions.asJavaMap(ZkUtils.getPartitionsForTopics(getZkClient(), buffer));
	List<Object> javaList = JavaConversions.asJavaList(topicPartMap.get(topic));
	
	List<Integer> retList = new ArrayList<Integer>();
	for (Object obj : javaList) {
		retList.add((Integer)obj);
	}
	
	return retList;
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:21,代码来源:AdminUtil.java

示例3: startBroker

import scala.collection.mutable.Buffer; //导入依赖的package包/类
private KafkaServer startBroker(Properties props) {
    List<KafkaMetricsReporter> kmrList = new ArrayList<>();
    Buffer<KafkaMetricsReporter> metricsList = scala.collection.JavaConversions.asScalaBuffer(kmrList);
    KafkaServer server =
                    new KafkaServer(new KafkaConfig(props), new SystemTime(), Option.<String>empty(), metricsList);
    server.startup();
    return server;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:9,代码来源:EmbeddedKafkaCluster.java

示例4: startBroker

import scala.collection.mutable.Buffer; //导入依赖的package包/类
private KafkaServer startBroker(Properties props) {
    List<KafkaMetricsReporter> kmrList = new ArrayList<>();
    Buffer<KafkaMetricsReporter> metricsList = scala.collection.JavaConversions.asScalaBuffer(kmrList);
    KafkaServer server = new KafkaServer(new KafkaConfig(props), new SystemTime(), Option.<String>empty(), metricsList);
    server.startup();
    return server;
}
 
开发者ID:wildfly-extras,项目名称:wildfly-camel,代码行数:8,代码来源:EmbeddedKafkaBroker.java

示例5: reassignPartition

import scala.collection.mutable.Buffer; //导入依赖的package包/类
/**
 * Re-assign topic/partition to remainingBrokers
 * Remaining brokers is a list of id's of the brokers where the topic/partition is to be moved to.
 * 
 *   Thus if remainingBrokers = [1,2] the topic will be moved to brokers 1 and 2 
 *   
 *   @see https://kafka.apache.org/documentation.html#basic_ops_cluster_expansion
 *   @see https://cwiki.apache.org/confluence/display/KAFKA/Replication+tools#Replicationtools-6.ReassignPartitionsTool
 * */
public boolean reassignPartition(String topic, int partition, List<Object> remainingBrokers) {
  ZkClient client = new ZkClient(zkConnects, 10000, 10000, ZKStringSerializer$.MODULE$);

  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);

  Buffer<Object> seqs = asScalaBuffer(remainingBrokers);
  Map<TopicAndPartition, Seq<Object>> map = new HashMap<>();
  map.put(topicAndPartition, seqs);
  scala.collection.mutable.Map<TopicAndPartition, Seq<Object>> x = asScalaMap(map);
  ReassignPartitionsCommand command = new ReassignPartitionsCommand(client, x);

  return command.reassignPartitions();
}
 
开发者ID:DemandCube,项目名称:Scribengin,代码行数:23,代码来源:KafkaTool.java

示例6: reassignPartitionReplicas

import scala.collection.mutable.Buffer; //导入依赖的package包/类
public boolean reassignPartitionReplicas(String topic, int partition, Integer ... brokerId) {
  ZkClient client = new ZkClient(zkConnects, 10000, 10000, ZKStringSerializer$.MODULE$);
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);

  Buffer<Object> seqs = asScalaBuffer(Arrays.asList((Object[])brokerId));
  Map<TopicAndPartition, Seq<Object>> map = new HashMap<>();
  map.put(topicAndPartition, seqs);
  ReassignPartitionsCommand command = new ReassignPartitionsCommand(client, asScalaMap(map));
  return command.reassignPartitions();
}
 
开发者ID:DemandCube,项目名称:Scribengin,代码行数:11,代码来源:KafkaTool.java

示例7: testKafkaTransport

import scala.collection.mutable.Buffer; //导入依赖的package包/类
@Test
public void testKafkaTransport() throws Exception {

  String topic = "zipkin";
  // Kafka setup
  EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect());
  ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
  Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
  KafkaConfig config = new KafkaConfig(props);
  KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime());

  Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer));
  TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties());
  zkClient.close();
  TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000);

  // HTrace
  HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs(
      "sampler.classes", "AlwaysSampler",
      "span.receiver.classes", ZipkinSpanReceiver.class.getName(),
      "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(),
      "zipkin.kafka.topic", topic,
      ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName()
  );

  final Tracer tracer = new Tracer.Builder("test-tracer")
      .tracerPool(new TracerPool("test-tracer-pool"))
      .conf(hTraceConfiguration)
      .build();

  String scopeName = "test-kafka-transport-scope";
  TraceScope traceScope = tracer.newScope(scopeName);
  traceScope.close();
  tracer.close();

  // Kafka consumer
  Properties consumerProps = new Properties();
  consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect"));
  consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group");
  consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest");
  ConsumerConnector connector =
      kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps));
  Map<String, Integer> topicCountMap = new HashMap<>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);
  ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator();

  // Test
  Assert.assertTrue("We should have one message in Kafka", it.hasNext());
  Span span = new Span();
  new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message());
  Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName);

  kafkaServer.shutdown();

}
 
开发者ID:apache,项目名称:incubator-htrace,代码行数:57,代码来源:ITZipkinReceiver.java

示例8: sendPreparedStatement

import scala.collection.mutable.Buffer; //导入依赖的package包/类
public ComposableFuture<QueryResult> sendPreparedStatement(final String query, final List<Object> values) {
  final Buffer<Object> scalaValues = JavaConversions.asScalaBuffer(values);
  return ScalaFutureHelper.from(() -> conn.sendPreparedStatement(query, scalaValues));
}
 
开发者ID:outbrain,项目名称:ob1k,代码行数:5,代码来源:MySqlAsyncConnection.java

示例9: sendPreparedStatement

import scala.collection.mutable.Buffer; //导入依赖的package包/类
@Override
public ComposableFuture<QueryResult> sendPreparedStatement(final String query, final List<Object> values) {
  final Buffer<Object> scalaValues = JavaConversions.asScalaBuffer(values);
  return ScalaFutureHelper.from(() -> _pool.sendPreparedStatement(query, scalaValues));
}
 
开发者ID:outbrain,项目名称:ob1k,代码行数:6,代码来源:MySqlConnectionPool.java

示例10: get

import scala.collection.mutable.Buffer; //导入依赖的package包/类
/**
* Translates a message.
*
* Uses `java.text.MessageFormat` internally to format the message.
*
* @param lang the message lang
* @param key the message key
* @param args the message arguments
* @return the formatted message or a default rendering if the key wasn't defined
*/
public static String get(Lang lang, String key, Object... args) {
    Buffer<Object> scalaArgs = scala.collection.JavaConverters.asScalaBufferConverter(Arrays.asList(args)).asScala();
    return play.api.i18n.Messages.apply(key, scalaArgs, lang);
}
 
开发者ID:vangav,项目名称:vos_backend,代码行数:15,代码来源:Messages.java


注:本文中的scala.collection.mutable.Buffer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。