本文整理汇总了Java中backtype.storm.spout.SchemeAsMultiScheme类的典型用法代码示例。如果您正苦于以下问题:Java SchemeAsMultiScheme类的具体用法?Java SchemeAsMultiScheme怎么用?Java SchemeAsMultiScheme使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SchemeAsMultiScheme类属于backtype.storm.spout包,在下文中一共展示了SchemeAsMultiScheme类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: constructKafkaSpoutConf
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
/*
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
*/
String topic = "addresses";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例2: stringStreamFromKafkaWithTime
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public WorkloadOperator<WithTime<String>> stringStreamFromKafkaWithTime(String zkConStr,
String kafkaServers,
String group,
String topics,
String offset,
String componentId,
int parallelism) {
conf.setNumWorkers(parallelism);
BrokerHosts hosts = new ZkHosts(zkConStr);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topics, "/" + topics, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
if (offset.endsWith("smallest")) {
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
}
spoutConfig.fetchSizeBytes = 1024;
spoutConfig.bufferSizeBytes = 1024;
// spoutConfig.ignoreZkOffsets = true;
topologyBuilder.setSpout("spout", new KafkaSpout(spoutConfig), parallelism);
topologyBuilder.setBolt("addTime", new WithTimeBolt<String>(), parallelism).localOrShuffleGrouping("spout");
return new StormOperator<>(topologyBuilder, "addTime", parallelism);
}
示例3: pointStreamFromKafka
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public WorkloadOperator<Point> pointStreamFromKafka(String zkConStr, String kafkaServers, String group, String topics, String offset, String componentId, int parallelism) {
conf.setNumWorkers(parallelism);
BrokerHosts hosts = new ZkHosts(zkConStr);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topics, "/" + topics, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
if (offset.endsWith("smallest")) {
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
}
spoutConfig.fetchSizeBytes = 1024;
spoutConfig.bufferSizeBytes = 1024;
// spoutConfig.ignoreZkOffsets = true;
topologyBuilder.setSpout("spout", new KafkaSpout(spoutConfig), parallelism);
topologyBuilder.setBolt("extractPoint", new ExtractPointBolt(), parallelism).localOrShuffleGrouping("spout");
return new StormOperator<>(topologyBuilder, "extractPoint", parallelism);
}
示例4: stringStreamFromKafka
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public WorkloadOperator<String> stringStreamFromKafka(String zkConStr,
String kafkaServers,
String group,
String topics,
String offset,
String componentId,
int parallelism) {
conf.setNumWorkers(parallelism);
BrokerHosts hosts = new ZkHosts(zkConStr);
SpoutConfig spoutConfig = new SpoutConfig(hosts, topics, "/" + topics, UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
spoutConfig.fetchSizeBytes = 1024;
spoutConfig.bufferSizeBytes = 1024;
// spoutConfig.ignoreZkOffsets = true;
topologyBuilder.setSpout(componentId, new KafkaSpout(spoutConfig), parallelism);
return new StormOperator<>(topologyBuilder, componentId, parallelism);
}
示例5: main
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
builder.setBolt("aggregator", new AggregatorBolt(), 1)
.fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
.allGrouping("counter", "tick");
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例6: main
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void main(String[] args) throws WorkloadException {
TopologyBuilder builder = new TopologyBuilder();
BrokerHosts hosts = new ZkHosts("localhost:2181");
SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.ignoreZkOffsets = true;
builder.setSpout("spout", new KafkaSpout(spoutConfig));
builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));
Config conf = new Config();
conf.setDebug(true);
conf.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
示例7: configureKafkaSpout
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic,
String kafkaStartOffset, int parallelismHint, String spoutName,
String spoutScheme) {
LOG.info("KAFKASPOUT: Configuring the KafkaSpout");
// Configure the KafkaSpout
SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
kafkaTopic, // Kafka topic to read from
"/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
UUID.randomUUID().toString()); // ID for storing consumer offsets in Zookeeper
try {
spoutConfig.scheme = new SchemeAsMultiScheme(getSchemeFromClassName(spoutScheme));
} catch(Exception e) {
LOG.error("ERROR: Unable to create instance of scheme: " + spoutScheme);
e.printStackTrace();
}
setKafkaOffset(spoutConfig, kafkaStartOffset);
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// Add the spout and bolt to the topology
builder.setSpout(spoutName, kafkaSpout, parallelismHint);
}
示例8: buildTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public StormTopology buildTopology(Properties properties) {
// Load properties for the storm topology
String kafkaTopic = properties.getProperty("kafka.topic");
SpoutConfig kafkaConfig = new SpoutConfig(kafkaBrokerHosts, kafkaTopic, "", "storm");
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
TopologyBuilder builder = new TopologyBuilder();
// Specific audit logs analysis bolts
AuditLoginsCounterBolt loginCounterbolt = new AuditLoginsCounterBolt();
AuditParserBolt auditParserBolt = new AuditParserBolt();
// Elastic search bolt
TupleMapper tupleMapper = new DefaultTupleMapper();
ElasticSearchBolt elasticSearchBolt = new ElasticSearchBolt(tupleMapper);
// Topology scheme: KafkaSpout -> auditParserBolt -> loginCounterBolt -> elasticSearchBolt
builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 1);
builder.setBolt("ParseBolt", auditParserBolt, 1).shuffleGrouping("KafkaSpout");
builder.setBolt("CountBolt", loginCounterbolt, 1).shuffleGrouping("ParseBolt");
builder.setBolt("ElasticSearchBolt", elasticSearchBolt, 1)
.fieldsGrouping("CountBolt", new Fields("id", "index", "type", "document"));
return builder.createTopology();
}
示例9: getTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new TransactionalTridentKafkaSpout(
KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.parallelismHint(splitNum)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(countNum);
/* trident.newStream("wordcount", spout)
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/
return trident.build();
}
示例10: getTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int viewBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_VIEW_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.ONE), viewBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNT_ID, new WordCount.Count(), cntBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例11: getTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int filterBoltNum = BenchmarkUtils.getInt(config, FILTER_NUM, DEFAULT_FILTER_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.STATUS, Item.ALL), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(FILTER_ID, new FilterBolt<Integer>(404), filterBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.STATUS.toString()));
return builder.createTopology();
}
示例12: getTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
.fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));
return builder.createTopology();
}
示例13: getTopology
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int uvBoltNum = BenchmarkUtils.getInt(config, UNIQUER_NUM, DEFAULT_UV_BOLT_NUM);
final int winLen = BenchmarkUtils.getInt(config, WINDOW_LENGTH, DEFAULT_WINDOW_LENGTH_IN_SEC);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ, DEFAULT_EMIT_FREQ_IN_SEC);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.USER), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(UNIQUER_ID, new UniqueVisitorBolt(winLen, emitFreq), uvBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例14: configureKafkaSpout
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
public static void configureKafkaSpout(TopologyBuilder builder, String zkHostString, String kafkaTopic, String kafkaStartOffset) {
// Configure the KafkaSpout
SpoutConfig spoutConfig = new SpoutConfig(new ZkHosts(zkHostString),
kafkaTopic, // Kafka topic to read from
"/" + kafkaTopic, // Root path in Zookeeper for the spout to store consumer offsets
UUID.randomUUID().toString()); // ID for storing consumer offsets in Zookeeper
//spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConfig.scheme = new SchemeAsMultiScheme(new JsonScheme());
// Allow for passing in an offset time
// startOffsetTime has a bug that ignores the special -2 value
if(kafkaStartOffset == "-2") {
spoutConfig.forceFromStart = true;
} else if (kafkaStartOffset != null) {
spoutConfig.startOffsetTime = Long.parseLong(kafkaStartOffset);
}
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// Add the spout and bolt to the topology
builder.setSpout("kafkaspout", kafkaSpout, 1);
}
示例15: initializeKafkaSpout
import backtype.storm.spout.SchemeAsMultiScheme; //导入依赖的package包/类
private boolean initializeKafkaSpout(String name) {
try {
BrokerHosts zk = new ZkHosts(config.getString("kafka.zk"));
String input_topic = config.getString("spout.kafka.topic");
SpoutConfig kafkaConfig = new SpoutConfig(zk, input_topic, "",
input_topic);
kafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme());
kafkaConfig.forceFromStart = Boolean.valueOf("True");
kafkaConfig.startOffsetTime = -1;
builder.setSpout(name, new KafkaSpout(kafkaConfig),
config.getInt("spout.kafka.parallelism.hint")).setNumTasks(
config.getInt("spout.kafka.num.tasks"));
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
return true;
}