当前位置: 首页>>代码示例>>Java>>正文


Java TopicFilter类代码示例

本文整理汇总了Java中kafka.consumer.TopicFilter的典型用法代码示例。如果您正苦于以下问题:Java TopicFilter类的具体用法?Java TopicFilter怎么用?Java TopicFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TopicFilter类属于kafka.consumer包,在下文中一共展示了TopicFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runNoData

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Test
public void runNoData() throws Exception {
  when(iterator.hasNext()).thenReturn(false);

  final KafkaRpcPluginThread writer = Mockito.spy(
      new KafkaRpcPluginThread(group, 1, TOPICS));
  writer.run();
  verify(tsdb, never()).addPoint(anyString(), anyLong(), anyLong(), anyMap());
  verify(tsdb, never()).addHistogramPoint(anyString(), anyLong(), 
      any(byte[].class), anyMap());
  verify(tsdb, never()).addAggregatePoint(anyString(), anyLong(), anyLong(), 
      anyMap(), anyBoolean(), anyString(), anyString(), anyString());
  verify(consumer_connector, times(1))
    .createMessageStreamsByFilter(any(TopicFilter.class), anyInt());
  verify(writer, times(1)).shutdown();
  verify(consumer_connector, times(1)).shutdown();
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:18,代码来源:TestKafkaRpcPluginThread.java

示例2: runNoDataRestart

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Test
public void runNoDataRestart() throws Exception {
  when(iterator.hasNext()).thenReturn(false);

  final KafkaRpcPluginThread writer = Mockito.spy(
      new KafkaRpcPluginThread(group, 1, TOPICS));
  writer.run();
  writer.run();
  verify(tsdb, never()).addPoint(anyString(), anyLong(), anyLong(), anyMap());
  verify(tsdb, never()).addHistogramPoint(anyString(), anyLong(), 
      any(byte[].class), anyMap());
  verify(tsdb, never()).addAggregatePoint(anyString(), anyLong(), anyLong(), 
      anyMap(), anyBoolean(), anyString(), anyString(), anyString());
  verify(consumer_connector, times(2))
    .createMessageStreamsByFilter(any(TopicFilter.class), anyInt());
  verify(writer, times(2)).shutdown();
  verify(consumer_connector, times(2)).shutdown();
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:19,代码来源:TestKafkaRpcPluginThread.java

示例3: runNoStreams

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Test
public void runNoStreams() throws Exception {
  when(stream_list.get(0))
          .thenThrow(new ArrayIndexOutOfBoundsException());

  KafkaRpcPluginThread writer = Mockito.spy(
      new KafkaRpcPluginThread(group, 1, TOPICS));
  writer.run();
  verify(tsdb, never()).addPoint(anyString(), anyLong(), anyLong(), anyMap());
  verify(tsdb, never()).addHistogramPoint(anyString(), anyLong(), 
      any(byte[].class), anyMap());
  verify(tsdb, never()).addAggregatePoint(anyString(), anyLong(), anyLong(), 
      anyMap(), anyBoolean(), anyString(), anyString(), anyString());
  verify(consumer_connector, times(1))
    .createMessageStreamsByFilter(any(TopicFilter.class), anyInt());
  verify(writer, times(1)).shutdown();
  verify(consumer_connector, times(1)).shutdown();
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:19,代码来源:TestKafkaRpcPluginThread.java

示例4: verifyMessageRead

import kafka.consumer.TopicFilter; //导入依赖的package包/类
private void verifyMessageRead(final KafkaRpcPluginThread writer,
                               final boolean requeued) {
  verify(writer, times(1)).shutdown();
  verify(consumer_connector, times(1)).shutdown();
  verify(consumer_connector, times(1))
    .createMessageStreamsByFilter(any(TopicFilter.class), anyInt());
  verify(iterator, times(2)).hasNext();
  if (requeued) {
    verify(requeue, times(1)).handleError( 
        any(IncomingDataPoint.class), any(Exception.class));
  } else {
    verify(requeue, never()).handleError(
        any(IncomingDataPoint.class), any(Exception.class));
  }
  if (data != null) {
    verify(rate_limiter, times(1)).acquire();
  }
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:19,代码来源:TestKafkaRpcPluginThread.java

示例5: onStart

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Override
protected void onStart(DbzNode node) {
    logger.debug("DATABASES: Starting and subscribing to '{}'...", Topic.SCHEMA_UPDATES);
    // Add a single-threaded consumer that will read the "schema-updates" topic to get all database schema updates.
    // We use a unique group ID so that we get *all* the messages on this topic.
    int numThreads = 1;
    String groupId = "databases-" + node.id(); // unique so that all clients see all messages
    TopicFilter topicFilter = Topics.anyOf(Topic.SCHEMA_UPDATES);
    node.subscribe(groupId, topicFilter, numThreads, (topic, partition, offset, key, msg) -> {
        Document updatedSchema = Message.getAfter(msg);
        DatabaseId dbId = Identifier.parseDatabaseId(key);
        activeDatabases.put(dbId.asString(), new ActiveDatabase(dbId, updatedSchema));
        logger.debug("DATABASES: Cached active database '{}'...", dbId);
        return true;
    });
}
 
开发者ID:rhauch,项目名称:debezium-proto,代码行数:17,代码来源:DbzDatabases.java

示例6: MessageReader

import kafka.consumer.TopicFilter; //导入依赖的package包/类
public MessageReader(SecorConfig config, OffsetTracker offsetTracker) throws
        UnknownHostException {
    mConfig = config;
    mOffsetTracker = offsetTracker;

    mConsumerConnector = Consumer.createJavaConsumerConnector(createConsumerConfig());

    if (!mConfig.getKafkaTopicBlacklist().isEmpty() && !mConfig.getKafkaTopicFilter().isEmpty()) {
        throw new RuntimeException("Topic filter and blacklist cannot be both specified.");
    }
    TopicFilter topicFilter = !mConfig.getKafkaTopicBlacklist().isEmpty()? new Blacklist(mConfig.getKafkaTopicBlacklist()):
            new Whitelist(mConfig.getKafkaTopicFilter());
    LOG.debug("Use TopicFilter {}({})", topicFilter.getClass(), topicFilter);
    List<KafkaStream<byte[], byte[]>> streams =
        mConsumerConnector.createMessageStreamsByFilter(topicFilter);
    KafkaStream<byte[], byte[]> stream = streams.get(0);
    mIterator = stream.iterator();
    mLastAccessTime = new HashMap<TopicPartition, Long>();
    StatsUtil.setLabel("secor.kafka.consumer.id", IdUtil.getConsumerId());
    mTopicPartitionForgetSeconds = mConfig.getTopicPartitionForgetSeconds();
    mCheckMessagesPerSecond = mConfig.getMessagesPerSecond() / mConfig.getConsumerThreads();
    mKafkaMessageTimestampFactory = new KafkaMessageTimestampFactory(mConfig.getKafkaMessageTimestampClass());
}
 
开发者ID:pinterest,项目名称:secor,代码行数:24,代码来源:MessageReader.java

示例7: runConsumerRuntimeException

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Test
public void runConsumerRuntimeException() throws Exception {
  when(consumer_connector.createMessageStreamsByFilter(
      (TopicFilter) any(), anyInt())).thenThrow(
          new RuntimeException("Foobar"));
  KafkaRpcPluginThread writer = Mockito.spy(
      new KafkaRpcPluginThread(group, 1, TOPICS));
  writer.run();
  
  verify(writer, times(1)).shutdown();
  verify(consumer_connector, times(1)).shutdown();
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:13,代码来源:TestKafkaRpcPluginThread.java

示例8: runConsumerException

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@Test(expected = Exception.class)
public void runConsumerException() throws Exception {
  when(consumer_connector.createMessageStreamsByFilter(
      (TopicFilter) any(), anyInt())).thenThrow(
          new Exception("Foobar"));
  KafkaRpcPluginThread writer = Mockito.spy(
      new KafkaRpcPluginThread(group, 1, TOPICS));
  writer.run();
  
  verify(writer, times(1)).shutdown();
  verify(consumer_connector, times(1)).shutdown();
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:13,代码来源:TestKafkaRpcPluginThread.java

示例9: init

import kafka.consumer.TopicFilter; //导入依赖的package包/类
private void init() {
  // register kafka offset lag metrics, one Gauge is for per consumer level granularity
  MetricRegistry registry = Metrics.getRegistry();
  try {
    fetchedMsgCounter = registry.meter("kafkaIngesterConsumer." + this.getName() + "-msgFetchRate");
    failedToIngestCounter = registry.meter("kafkaIngesterConsumer." + this.getName() + "-failedToIngest");
    kafkaOffsetLagGauge =
        registry.register("kafkaIngesterConsumer." + this.getName() + "-kafkaOffsetLag", new JmxAttributeGauge(
            new ObjectName(maxLagMetricName), "Value"));
  } catch (MalformedObjectNameException | IllegalArgumentException e) {
    logger.error("Register failure for metrics of KafkaIngesterConsumer", e);
  }

  TopicFilter topicFilter = new Whitelist(AuditConfig.AUDIT_TOPIC_NAME);
  logger.info("{}: Topic filter is {}", getName(), AuditConfig.AUDIT_TOPIC_NAME);
  this.consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
  KafkaStream<byte[], byte[]> stream = consumer.createMessageStreamsByFilter(topicFilter, 1).get(0);
  iterator = stream.iterator();
  logger.info("KafkaIngesterConsumer thread {} is initialized successfully", getName());

  if (AuditConfig.INGESTER_ENABLE_DEDUP) {
    deduplicator =
        new Deduplicator(threadId, AuditConfig.INGESTER_REDIS_HOST, AuditConfig.INGESTER_REDIS_PORT,
            AuditConfig.INGESTER_REDIS_KEY_TTL_SEC, AuditConfig.INGESTER_DUP_HOST_PREFIX,
            AuditConfig.INGESTER_HOSTS_WITH_DUP);
    deduplicator.open();
  } else {
    deduplicator = null;
  }
}
 
开发者ID:uber,项目名称:chaperone,代码行数:31,代码来源:KafkaIngesterConsumer.java

示例10: WildcardTopicCount

import kafka.consumer.TopicFilter; //导入依赖的package包/类
public WildcardTopicCount(ZKConnector<?> zkClient, String consumerIdString,
		TopicFilter topicFilter, Integer numStreams) {
	this.consumerIdString = consumerIdString;
	this.numStreams = numStreams;
	this.topicFilter = topicFilter;
	this.zkClient = zkClient;
}
 
开发者ID:pulsarIO,项目名称:druid-kafka-ext,代码行数:8,代码来源:TopicCount.java

示例11: anyOf

import kafka.consumer.TopicFilter; //导入依赖的package包/类
public static TopicFilter anyOf( String...topics) {
    StringJoiner joiner = new StringJoiner(",");
    for ( String topic : topics ) {
        joiner.add(topic);
    }
    return new Whitelist(joiner.toString());
}
 
开发者ID:rhauch,项目名称:debezium-proto,代码行数:8,代码来源:Topics.java

示例12: noneOf

import kafka.consumer.TopicFilter; //导入依赖的package包/类
public static TopicFilter noneOf( String...topics) {
    StringJoiner joiner = new StringJoiner(",");
    for ( String topic : topics ) {
        joiner.add(topic);
    }
    return new Blacklist(joiner.toString());
}
 
开发者ID:rhauch,项目名称:debezium-proto,代码行数:8,代码来源:Topics.java

示例13: Subscriber

import kafka.consumer.TopicFilter; //导入依赖的package包/类
public Subscriber(String groupId, TopicFilter topicFilter, int numThreads, Deserializer<KeyType> keyDecoder,
                  Deserializer<MessageType> messageDecoder, MessageConsumer<KeyType, MessageType> consumer) {
    this.topicFilter = topicFilter;
    this.keyDecoder = keyDecoder;
    this.messageDecoder = messageDecoder;
    this.consumer = consumer;
}
 
开发者ID:rhauch,项目名称:debezium-proto,代码行数:8,代码来源:InMemorySyncMessageBus.java

示例14: before

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Before
public void before() throws Exception {
  tsdb = PowerMockito.mock(TSDB.class);
  config = new KafkaRpcPluginConfig(new Config(false));
  group = mock(KafkaRpcPluginGroup.class);
  message = mock(MessageAndMetadata.class);
  rate_limiter = mock(RateLimiter.class);
  requeue = mock(KafkaStorageExceptionHandler.class);
  counters = new ConcurrentHashMap<String, Map<String, AtomicLong>>();
  deserializer = new JSONDeserializer();
  
  consumer_connector = mock(ConsumerConnector.class);

  mockStatic(Consumer.class);
  when(Consumer.createJavaConsumerConnector((ConsumerConfig) any()))
          .thenReturn(consumer_connector);
  
  when(tsdb.getConfig()).thenReturn(config);
  when(tsdb.getStorageExceptionHandler()).thenReturn(requeue);
  
  parent = mock(KafkaRpcPlugin.class);
  when(parent.getHost()).thenReturn(LOCALHOST);
  when(parent.getTSDB()).thenReturn(tsdb);
  when(parent.getConfig()).thenReturn(config);
  when(parent.getNamespaceCounters()).thenReturn(counters);
  when(parent.trackMetricPrefix()).thenReturn(true);
  
  when(group.getParent()).thenReturn(parent);
  when(group.getRateLimiter()).thenReturn(rate_limiter);
  when(group.getGroupID()).thenReturn(GROUPID);
  when(group.getConsumerType()).thenReturn(TsdbConsumerType.RAW);
  when(group.getDeserializer()).thenReturn(deserializer);
  
  config.overrideConfig(KafkaRpcPluginConfig.KAFKA_CONFIG_PREFIX 
      + "zookeeper.connect", ZKS);
  
  stream_list = mock(List.class);
  when(consumer_connector.createMessageStreamsByFilter(
      (TopicFilter) any(), anyInt())).thenReturn(stream_list);

  final KafkaStream<byte[], byte[]> stream = mock(KafkaStream.class);
  when(stream_list.get(0)).thenReturn(stream);

  iterator = mock(ConsumerIterator.class);
  when(stream.iterator()).thenReturn(iterator);

  when(iterator.hasNext()).thenReturn(true).thenReturn(false);
  when(iterator.next()).thenReturn(message);
  
  PowerMockito.mockStatic(ConsumerConfig.class);
  PowerMockito.whenNew(ConsumerConfig.class).withAnyArguments()
    .thenReturn(mock(ConsumerConfig.class));
  
  PowerMockito.mockStatic(Consumer.class);
  when(Consumer.createJavaConsumerConnector(any(ConsumerConfig.class)))
    .thenReturn(consumer_connector);
}
 
开发者ID:OpenTSDB,项目名称:opentsdb-rpc-kafka,代码行数:59,代码来源:TestKafkaRpcPluginThread.java

示例15: constructTopicCount

import kafka.consumer.TopicFilter; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public static TopicCount constructTopicCount(ZKConnector<?> zkClient, String group,
		String consumerId) {
	KafkaZKData.ZKGroupDirs dirs = new KafkaZKData.ZKGroupDirs(group);
	String subscriptionPattern = null;
	Map<String, Integer> topMap = null;
	try {
		String topicCountString = zkClient.readData(dirs.consumerRegistryDir() + "/" + consumerId);
		ObjectMapper mapper = new ObjectMapper();
		TypeReference<Map<String, Object>> typeMap = new TypeReference<Map<String, Object>>() {
		};
		Map<String, Object> jsonObj = mapper.reader(typeMap).readValue(
				topicCountString);
		if (jsonObj == null)
			throw new KafkaZKException("error constructing TopicCount : "
					+ topicCountString);
		Object pattern = jsonObj.get("pattern");
		if (pattern == null)
			throw new KafkaZKException("error constructing TopicCount : "
					+ topicCountString);
		subscriptionPattern = (String) pattern;
		Object sub = jsonObj.get("subscription");
		if (sub == null)
			throw new KafkaZKException("error constructing TopicCount : "
					+ topicCountString);
		topMap = (Map<String, Integer>) sub;

	} catch (Throwable t) {
		throw new KafkaZKException(t);
	}

	boolean hasWhiteList = whiteListPattern.equals(subscriptionPattern);
	boolean hasBlackList = blackListPattern.equals(subscriptionPattern);

	if (topMap.isEmpty() || !(hasWhiteList || hasBlackList)) {
		return new StaticTopicCount(consumerId, topMap);
	} else {
		String regex = null;
		Integer numStreams = -1;
		for (Entry<String, Integer> entity : topMap.entrySet()) {
			regex = entity.getKey();
			numStreams = entity.getValue();
			break;
		}
		TopicFilter filter = hasWhiteList ? new Whitelist(regex)
				: new Blacklist(regex);

		return new WildcardTopicCount(zkClient, consumerId, filter,
				numStreams);
	}

}
 
开发者ID:pulsarIO,项目名称:druid-kafka-ext,代码行数:53,代码来源:TopicCount.java


注:本文中的kafka.consumer.TopicFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。