本文整理汇总了Java中org.apache.storm.spout.SchemeAsMultiScheme类的典型用法代码示例。如果您正苦于以下问题:Java SchemeAsMultiScheme类的具体用法?Java SchemeAsMultiScheme怎么用?Java SchemeAsMultiScheme使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SchemeAsMultiScheme类属于org.apache.storm.spout包,在下文中一共展示了SchemeAsMultiScheme类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts("localhost:2181"), "test");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(config);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout).each(new Fields("str"),
new Split(), new Fields("word"))
.window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new Consumer() {
@Override
public void accept(TridentTuple tuple) {
System.out.print("[");
for (int index = 0; index < tuple.size(); index++) {
System.out.print(tuple.get(index));
if (index < (tuple.size() - 1))
System.out.print(",");
}
System.out.println("]");
}
});
return topology.build();
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:26,代码来源:TridentWindowingInmemoryStoreTopology.java
示例2: main
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String args[]) {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts(
"localhost:2181"), "storm-trident-diy");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(
config);
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).filter(new ExcludePhoneNumber())
.each(new Fields("str"), new DeviceInfoExtractor(), new Fields("phone", "bytes"))
.groupBy(new Fields("phone"))
.aggregate(new Fields("bytes", "phone"), new Sum(), new Fields("sum"))
.applyAssembly(new FirstN(10, "sum"))
.each(new Fields("phone", "sum"), new Debug());
Config config1 = new Config();
config1.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-diy", config1, topology.build());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:21,代码来源:TridentDIY.java
示例3: main
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String args[]) throws InterruptedException {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "sensor-data";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("es-bolt", new ElasticSearchBolt(), 1).shuffleGrouping("spout");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-es-example", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:20,代码来源:SensorTopology.java
示例4: main
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
config.setNumWorkers(3);
TopologyBuilder topologyBuilder = new TopologyBuilder();
String zkConnString = "localhost:2181";
String topicName = "storm-diy";
BrokerHosts hosts = new ZkHosts(zkConnString);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName , "/" + topicName, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
topologyBuilder.setSpout("spout", kafkaSpout, 1);
topologyBuilder.setBolt("parser", new ParseAndUsageBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("usageCassandra", new UsageCassandraBolt("localhost", "usage"), 1).shuffleGrouping("parser", "usagestream");
topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-diy", config, topologyBuilder.createTopology());
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:22,代码来源:TelecomProcessorTopology.java
示例5: buildTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
private static TopologyBuilder buildTopology() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
// builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
return builder;
}
示例6: buildTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
private static TopologyBuilder buildTopology() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
// builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
return builder;
}
示例7: buildSpout
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Bean("kafkaSpout")
public KafkaSpout buildSpout() {
super.setId("kafkaSpout");
/**
*
* new ZkHosts(brokerZkStr):brokerZkStr逗号分隔,kafka的zookeeper集群
* topic:storm订阅的topic,即从哪个topic读取消息
* spout会根据config的zkRoot和id两个参数在zookeeper上为每个kafka分区创建保存kafka偏移量的路径,如:/zkRoot/id/partitionId。
* {@link PartitionManager#committedPath()}
* zkRoot:偏移量保存的zk根路径
* id:如果重新运行,希望获取同样的偏移量,则设置为固定的ID
* PS:kafka新版本已经不将偏移量保存在zookeeper了。而且也不推荐将offset写入zk(低效)。
*/
SpoutConfig spoutConf = new SpoutConfig(new ZkHosts(brokerZkStr), topic, zkRoot, "kafkaSpout");
spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
return new KafkaSpout(spoutConf);
}
示例8: main
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String[] args){
args = new String[] {"localhost:2181", "storm-test-topic", "/brokers", "storm-consumer"};
// Log program usages and exit if there are less than 4 command line arguments
if(args.length < 4) {
LOG.fatal("Incorrect number of arguments. Required arguments: <zk-hosts> <kafka-topic> <zk-path> <clientid>");
System.exit(1);
}
// Build Spout configuration using input command line parameters
final BrokerHosts zkrHosts = new ZkHosts(args[0]);
final String kafkaTopic = args[1];
final String zkRoot = args[2];
final String clientId = args[3];
final SpoutConfig spoutConfig = new SpoutConfig(zkrHosts,kafkaTopic,zkRoot,clientId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
// Build topology to consume message from kafka and print them on console
final TopologyBuilder topologyBuilder = new TopologyBuilder();
// Build topology to consume message from kafka and print them on console
topologyBuilder.setSpout("kafka-spout",new KafkaSpout(spoutConfig));
//Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
topologyBuilder.setBolt("kafka-message",new LoggerBolt()).globalGrouping("kafka-spout");
//Route the output of Kafka Spout to Logger bolt to log messages consumed from Kafka
final LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("kafka-toology",new Config(),topologyBuilder.createTopology());
}
示例9: getActionTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
protected StormTopology getActionTopology(String zkhost) {
BrokerHosts hosts = new ZkHosts(zkhost);
TopologyBuilder builder = new TopologyBuilder();
// trigger spouts setting
SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
triggerConfig.ignoreZkOffsets = true;
// status spouts setting
SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
statusConfig.ignoreZkOffsets = true;
// Set spouts
builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
builder.setSpout("status-spout", new KafkaSpout(statusConfig));
/* Set bolts */
builder.setBolt("scheduling-bolt", new SchedulingBolt())
.shuffleGrouping("trigger-spout");
builder.setBolt("status-bolt", new StatusBolt(), 4)
.shuffleGrouping("status-spout");
builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
return builder.createTopology();
}
示例10: getActionTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
protected StormTopology getActionTopology(String zkhost) {
BrokerHosts hosts = new ZkHosts(zkhost);
TopologyBuilder builder = new TopologyBuilder();
// trigger spouts setting
SpoutConfig triggerConfig = new SpoutConfig(hosts, TOPICS[3], "/" + TOPICS[3], UUID.randomUUID().toString());
triggerConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
triggerConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
triggerConfig.ignoreZkOffsets = true;
// status spouts setting
SpoutConfig statusConfig = new SpoutConfig(hosts, TOPICS[4], "/" + TOPICS[4], UUID.randomUUID().toString());
statusConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
statusConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
statusConfig.ignoreZkOffsets = true;
// Set spouts
builder.setSpout("trigger-spout", new KafkaSpout(triggerConfig));
builder.setSpout("status-spout", new KafkaSpout(statusConfig));
/* Set bolts */
builder.setBolt("scheduling-bolt", new SchedulingBolt())
.allGrouping("trigger-spout");
builder.setBolt("status-bolt", new StatusBolt(), 4)
.allGrouping("status-spout");
builder.setBolt("execute-code-bolt", new ExecutingBolt()).fieldsGrouping("scheduling-bolt",new Fields("roadMapId"));
builder.setBolt("provisioning-bolt", new ProvisioningBolt()).fieldsGrouping("execute-code-bolt",new Fields("roadMapId"));
builder.setBolt("calling-feed-bolt", new CallingFeedBolt()).fieldsGrouping("provisioning-bolt",new Fields("roadMapId"));
return builder.createTopology();
}
示例11: constructKafkaSpoutConf
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
/*
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
*/
String topic = "addresses";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例12: getTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new TransactionalTridentKafkaSpout(
KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.parallelismHint(splitNum)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(countNum);
/* trident.newStream("wordcount", spout)
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/
return trident.build();
}
示例13: getTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int viewBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_VIEW_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.ONE), viewBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNT_ID, new WordCount.Count(), cntBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例14: getTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int filterBoltNum = BenchmarkUtils.getInt(config, FILTER_NUM, DEFAULT_FILTER_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.STATUS, Item.ALL), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(FILTER_ID, new FilterBolt<Integer>(404), filterBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.STATUS.toString()));
return builder.createTopology();
}
示例15: getTopology
import org.apache.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
.fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));
return builder.createTopology();
}