本文整理汇总了Java中org.apache.storm.spout.SpoutOutputCollector类的典型用法代码示例。如果您正苦于以下问题:Java SpoutOutputCollector类的具体用法?Java SpoutOutputCollector怎么用?Java SpoutOutputCollector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SpoutOutputCollector类属于org.apache.storm.spout包,在下文中一共展示了SpoutOutputCollector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Override
public void open(Map stormConf, TopologyContext context,
SpoutOutputCollector collector) {
partitionField = ConfUtils.getString(stormConf,
ESStatusBucketFieldParamName, "_routing");
bucketSortField = ConfUtils.getString(stormConf,
ESStatusBucketSortFieldParamName, bucketSortField);
totalSortField = ConfUtils.getString(stormConf,
ESStatusGlobalSortFieldParamName);
maxURLsPerBucket = ConfUtils.getInt(stormConf,
ESStatusMaxURLsParamName, 1);
maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
10);
super.open(stormConf, context, collector);
}
示例2: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
/**
* 初始化collector
*/
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID);
this.isGlobal = this.topologyId.toLowerCase().indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
if (this.isGlobal) {
this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
} else {
this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
}
LOG.info("topologyId:{} zkConnect:{} zkTopoRoot:{}", topologyId, zkConnect, zkTopoRoot);
try {
loadRunningConf(null);
// 检查是否有遗留未决的拉取任务。如果有,resolve(发resume消息通知给appender,并在zk上记录,且将监测到的pending任务写日志,方便排查问题)。
// 对于已经resolve的pending任务,将其移出pending队列,以免造成无限重复处理。
FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null,
DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
} catch (Exception e) {
e.printStackTrace();
throw new InitializationException();
}
LOG.info("Topology {} is started!", topologyId);
}
示例3: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
/**
* 初始化collectors
*/
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID);
this.isGlobal = this.topologyId.toLowerCase().
indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
if (this.isGlobal) {
this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
} else {
this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
}
try {
loadRunningConf(null);
// 检查是否有遗留未决的拉取任务。如果有,resolve(发resume消息通知给appender,并在zk上记录,且将监测到的pending任务写日志,方便排查问题)。
// 对于已经resolve的pending任务,将其移出pending队列,以免造成无限重复处理。
FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null, DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
} catch (Exception e) {
e.printStackTrace();
throw new InitializationException();
}
LOG.info("Topology {} is started!", topologyId);
}
示例4: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
initialized = false;
this.context = context;
// Spout internals
this.collector = collector;
numUncommittedOffsets = 0;
// Offset management
firstPollOffsetStrategy = kafkaSpoutConfig.getFirstPollOffsetStrategy();
// with AutoCommitMode, offset will be periodically committed in the background by Kafka consumer
// consumerAutoCommitMode = kafkaSpoutConfig.isConsumerAutoCommitMode();
//永远设置为false
consumerAutoCommitMode = false;
// Retries management
retryService = kafkaSpoutConfig.getRetryService();
if (!consumerAutoCommitMode) { // If it is auto commit, no need to commit offsets manually
commitTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getOffsetsCommitPeriodMs(), TimeUnit.MILLISECONDS);
}
refreshSubscriptionTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getPartitionRefreshPeriodMs(), TimeUnit.MILLISECONDS);
acked = new HashMap<>();
emitted = new HashSet<>();
waitingToEmit = Collections.emptyListIterator();
LOG.info("Kafka Spout opened with the following configuration: {}", kafkaSpoutConfig);
}
示例5: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Override
public void open(@SuppressWarnings("rawtypes") Map conf, TopologyContext context,
SpoutOutputCollector collector) {
String configYaml = (String) conf.get(ConfigurationConstants.CHATALYTICS_CONFIG.txt);
ChatAlyticsConfig config = YamlUtils.readChatAlyticsConfigFromString(configYaml);
LOG.info("Loaded config...");
hipchatDao = HipChatApiDAOFactory.getHipChatApiDao(config);
LOG.info("Got HipChat API DAO...");
dbDao = ChatAlyticsDAOFactory.createChatAlyticsDao(config);
LOG.info("Got database DAO...");
dtz = DateTimeZone.forID(config.timeZone);
this.collector = collector;
}
示例6: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@VisibleForTesting
protected void open(SlackConfig slackConfig, IChatApiDAO slackDao,
WebSocketContainer webSocketContainer, TopologyContext context,
SpoutOutputCollector collector) {
this.slackDao = slackDao;
this.collector = collector;
String startDateNullable = slackConfig.startDate;
// get end date, if there is one
if (startDateNullable != null) {
this.startDate = Optional.of(DateTime.parse(startDateNullable));
} else {
this.startDate = Optional.absent();
}
openRealtimeConnection(slackConfig, webSocketContainer);
}
示例7: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@VisibleForTesting
protected void open(SlackBackfillerConfig chatConfig, IChatApiDAO slackApiDao,
IChatAlyticsDAO dbDao, TopologyContext context,
SpoutOutputCollector collector) {
this.granularityMins = chatConfig.granularityMins;
this.collector = collector;
this.slackDao = slackApiDao;
this.dbDao = dbDao;
// get start date
if (chatConfig.startDate != null) {
this.initDate = DateTime.parse(chatConfig.startDate);
}
// get end date, if there is one
if (chatConfig.endDate != null) {
this.endDate = DateTime.parse(chatConfig.endDate);
}
}
示例8: setup
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Override
protected void setup() throws Exception {
super.internalSetup();
super.producerBaseSetup();
pulsarSpoutConf = new PulsarSpoutConfiguration();
pulsarSpoutConf.setServiceUrl(serviceUrl);
pulsarSpoutConf.setTopic(topic);
pulsarSpoutConf.setSubscriptionName(subscriptionName);
pulsarSpoutConf.setMessageToValuesMapper(messageToValuesMapper);
pulsarSpoutConf.setFailedRetriesTimeout(1, TimeUnit.SECONDS);
pulsarSpoutConf.setMaxFailedRetries(2);
pulsarSpoutConf.setSharedConsumerEnabled(true);
pulsarSpoutConf.setMetricsTimeIntervalInSecs(60);
consumerConf = new ConsumerConfiguration();
consumerConf.setSubscriptionType(SubscriptionType.Shared);
spout = new PulsarSpout(pulsarSpoutConf, new ClientConfiguration(), consumerConf);
mockCollector = new MockSpoutOutputCollector();
SpoutOutputCollector collector = new SpoutOutputCollector(mockCollector);
TopologyContext context = mock(TopologyContext.class);
when(context.getThisComponentId()).thenReturn("test-spout-" + methodName);
when(context.getThisTaskId()).thenReturn(0);
spout.open(Maps.newHashMap(), context, collector);
producer = pulsarClient.createProducer(topic);
}
示例9: testSharedConsumer
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Test
public void testSharedConsumer() throws Exception {
PersistentTopicStats topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 1);
PulsarSpout otherSpout = new PulsarSpout(pulsarSpoutConf, new ClientConfiguration(), consumerConf);
MockSpoutOutputCollector otherMockCollector = new MockSpoutOutputCollector();
SpoutOutputCollector collector = new SpoutOutputCollector(otherMockCollector);
TopologyContext context = mock(TopologyContext.class);
when(context.getThisComponentId()).thenReturn("test-spout-" + methodName);
when(context.getThisTaskId()).thenReturn(1);
otherSpout.open(Maps.newHashMap(), context, collector);
topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 1);
otherSpout.close();
topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 1);
}
示例10: testNoSharedConsumer
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Test
public void testNoSharedConsumer() throws Exception {
PersistentTopicStats topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 1);
pulsarSpoutConf.setSharedConsumerEnabled(false);
PulsarSpout otherSpout = new PulsarSpout(pulsarSpoutConf, new ClientConfiguration(), consumerConf);
MockSpoutOutputCollector otherMockCollector = new MockSpoutOutputCollector();
SpoutOutputCollector collector = new SpoutOutputCollector(otherMockCollector);
TopologyContext context = mock(TopologyContext.class);
when(context.getThisComponentId()).thenReturn("test-spout-" + methodName);
when(context.getThisTaskId()).thenReturn(1);
otherSpout.open(Maps.newHashMap(), context, collector);
topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 2);
otherSpout.close();
topicStats = admin.persistentTopics().getStats(topic);
Assert.assertEquals(topicStats.subscriptions.get(subscriptionName).consumers.size(), 1);
}
示例11: testFailedConsumer
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Test
public void testFailedConsumer() throws Exception {
PulsarSpoutConfiguration pulsarSpoutConf = new PulsarSpoutConfiguration();
pulsarSpoutConf.setServiceUrl(serviceUrl);
pulsarSpoutConf.setTopic("persistent://invalidTopic");
pulsarSpoutConf.setSubscriptionName(subscriptionName);
pulsarSpoutConf.setMessageToValuesMapper(messageToValuesMapper);
pulsarSpoutConf.setFailedRetriesTimeout(1, TimeUnit.SECONDS);
pulsarSpoutConf.setMaxFailedRetries(2);
pulsarSpoutConf.setSharedConsumerEnabled(false);
pulsarSpoutConf.setMetricsTimeIntervalInSecs(60);
ConsumerConfiguration consumerConf = new ConsumerConfiguration();
consumerConf.setSubscriptionType(SubscriptionType.Shared);
PulsarSpout spout = new PulsarSpout(pulsarSpoutConf, new ClientConfiguration(), consumerConf);
MockSpoutOutputCollector mockCollector = new MockSpoutOutputCollector();
SpoutOutputCollector collector = new SpoutOutputCollector(mockCollector);
TopologyContext context = mock(TopologyContext.class);
when(context.getThisComponentId()).thenReturn("new-test" + methodName);
when(context.getThisTaskId()).thenReturn(0);
try {
spout.open(Maps.newHashMap(), context, collector);
fail("should have failed as consumer creation failed");
} catch (IllegalStateException e) {
// Ok
}
}
示例12: DRPCQuerySubscriber
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
/**
* Creates and initializes a Subscriber that reads from the DRPC servers. Intended to be used inside a Storm
* spout in a Storm topology.
*
* @param config The config containing the String function in {@link DRPCConfig#DRPC_FUNCTION}, the Storm configuration
* {@link Map} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONFIG} and the Storm
* {@link TopologyContext} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONTEXT}.
* @param maxUnCommittedQueries The maximum number of queries that can be read without committing them.
*/
public DRPCQuerySubscriber(BulletConfig config, int maxUnCommittedQueries) {
super(maxUnCommittedQueries);
collector = new DRPCOutputCollector();
emittedIDs = new HashMap<>();
// Get the Storm Config that has all the relevant cluster settings and properties
Map stormConfig = config.getRequiredConfigAs(DRPCConfig.STORM_CONFIG, Map.class);
// Get the TopologyContext
TopologyContext context = config.getRequiredConfigAs(DRPCConfig.STORM_CONTEXT, TopologyContext.class);
// Wrap the collector in a SpoutOutputCollector (it just delegates to the underlying DRPCOutputCollector)
SpoutOutputCollector spoutOutputCollector = new SpoutOutputCollector(collector);
// Get the DRPC function we should subscribe to
String function = config.getRequiredConfigAs(DRPCConfig.DRPC_FUNCTION, String.class);
spout = new DRPCSpout(function);
spout.open(stormConfig, context, spoutOutputCollector);
}
示例13: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
_collector = spoutOutputCollector;
Properties props = new Properties();
props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
props.put("group.id", groupId);
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
List<KafkaStream<String, String>> streams = consumerMap.get(topic);
KafkaStream<String, String> stream = null;
if (streams.size() == 1) {
stream = streams.get(0);
} else {
log.error("Streams should be of size 1");
}
kafkaIterator = stream.iterator();
}
示例14: open
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
@Override
public void open(final Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
super.open(conf, context, collector);
Object configuredPath = conf.get(INPUT_FILE_PATH);
if (configuredPath != null) {
this.path = (String) configuredPath;
}
try {
this.reader = new BufferedReader(new FileReader(this.path));
} catch (final FileNotFoundException e) {
throw new RuntimeException(e);
}
}
示例15: init
import org.apache.storm.spout.SpoutOutputCollector; //导入依赖的package包/类
@Before
public void init() {
mySqlConfig = new MySqlConfig.Builder("testDatabase").build();
zkBinLogStateConfig = new ZkBinLogStateConfig.Builder("my-spout").build();
mySqlSpoutConfig = new MySqlSpoutConfig(mySqlConfig, zkBinLogStateConfig);
mockStormConfig = Mockito.mock(Map.class);
mockTopologyContext = Mockito.mock(TopologyContext.class);
mockSpoutOutputCollector = Mockito.mock(SpoutOutputCollector.class);
internalBuffer = new LinkedBlockingQueue();
mockMySqlClient = Mockito.mock(MySqlClient.class);
mockZkClient = Mockito.mock(ZkClient.class);
mockOpenReplicatorClient = Mockito.mock(OpenReplicatorClient.class);
mockClientFactory = Mockito.mock(ClientFactory.class);
}