本文整理汇总了Java中storm.kafka.StringScheme类的典型用法代码示例。如果您正苦于以下问题:Java StringScheme类的具体用法?Java StringScheme怎么用?Java StringScheme使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StringScheme类属于storm.kafka包,在下文中一共展示了StringScheme类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: constructKafkaSpoutConf
import storm.kafka.StringScheme; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
/*
String topic = topologyConfig.getProperty("kafka.topic");
String zkRoot = topologyConfig.getProperty("kafka.zkRoot");
String consumerGroupId = topologyConfig.getProperty("kafka.consumer.group.id");
*/
String topic = "addresses";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例2: buildTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
public StormTopology buildTopology(Properties properties) {
// Load properties for the storm topology
String kafkaTopic = properties.getProperty("kafka.topic");
SpoutConfig kafkaConfig = new SpoutConfig(kafkaBrokerHosts, kafkaTopic, "", "storm");
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
TopologyBuilder builder = new TopologyBuilder();
// Specific audit logs analysis bolts
AuditLoginsCounterBolt loginCounterbolt = new AuditLoginsCounterBolt();
AuditParserBolt auditParserBolt = new AuditParserBolt();
// Elastic search bolt
TupleMapper tupleMapper = new DefaultTupleMapper();
ElasticSearchBolt elasticSearchBolt = new ElasticSearchBolt(tupleMapper);
// Topology scheme: KafkaSpout -> auditParserBolt -> loginCounterBolt -> elasticSearchBolt
builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 1);
builder.setBolt("ParseBolt", auditParserBolt, 1).shuffleGrouping("KafkaSpout");
builder.setBolt("CountBolt", loginCounterbolt, 1).shuffleGrouping("ParseBolt");
builder.setBolt("ElasticSearchBolt", elasticSearchBolt, 1)
.fieldsGrouping("CountBolt", new Fields("id", "index", "type", "document"));
return builder.createTopology();
}
示例3: getTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new TransactionalTridentKafkaSpout(
KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.parallelismHint(splitNum)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(countNum);
/* trident.newStream("wordcount", spout)
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/
return trident.build();
}
示例4: getTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int viewBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_VIEW_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.ONE), viewBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(COUNT_ID, new WordCount.Count(), cntBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例5: getTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int filterBoltNum = BenchmarkUtils.getInt(config, FILTER_NUM, DEFAULT_FILTER_BOLT_NUM);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.STATUS, Item.ALL), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(FILTER_ID, new FilterBolt<Integer>(404), filterBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.STATUS.toString()));
return builder.createTopology();
}
示例6: getTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
.fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));
return builder.createTopology();
}
示例7: getTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int pvBoltNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_PV_BOLT_NUM);
final int uvBoltNum = BenchmarkUtils.getInt(config, UNIQUER_NUM, DEFAULT_UV_BOLT_NUM);
final int winLen = BenchmarkUtils.getInt(config, WINDOW_LENGTH, DEFAULT_WINDOW_LENGTH_IN_SEC);
final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ, DEFAULT_EMIT_FREQ_IN_SEC);
spout = new KafkaSpout(KafkaUtils.getSpoutConfig(
config, new SchemeAsMultiScheme(new StringScheme())));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(VIEW_ID, new PageViewBolt(Item.URL, Item.USER), pvBoltNum)
.localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(UNIQUER_ID, new UniqueVisitorBolt(winLen, emitFreq), uvBoltNum)
.fieldsGrouping(VIEW_ID, new Fields(Item.URL.toString()));
return builder.createTopology();
}
示例8: main
import storm.kafka.StringScheme; //导入依赖的package包/类
public static void main(String[] args) {
TopologyBuilder builder = new TopologyBuilder();
SpoutConfig spoutConf = new SpoutConfig(new ZkHosts("localhost:2181", "/brokers"), "test", "/kafkastorm", "KafkaSpout");
spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
spoutConf.forceFromStart = true;
builder.setSpout("KafkaSpout", new KafkaSpout(spoutConf), 3);
builder.setBolt("KafkaBolt", new PrinterBolt(), 3).shuffleGrouping("KafkaSpout");
Config conf = new Config();
// conf.setDebug(true);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafka-test", conf, builder.createTopology());
Utils.sleep(60000);
cluster.shutdown();
}
示例9: buildTopology
import storm.kafka.StringScheme; //导入依赖的package包/类
public StormTopology buildTopology(LocalDRPC drpc) {
TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(brokerHosts, "storm-sentence", "storm");
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
TransactionalTridentKafkaSpout kafkaSpout = new TransactionalTridentKafkaSpout(kafkaConfig);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("kafka", kafkaSpout).shuffle().
each(new Fields("str"), new WordSplit(), new Fields("word")).
groupBy(new Fields("word")).
persistentAggregate(new HazelCastStateFactory(), new Count(), new Fields("aggregates_words")).parallelismHint(2);
topology.newDRPCStream("words", drpc)
.each(new Fields("args"), new Split(), new Fields("word"))
.groupBy(new Fields("word"))
.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
.each(new Fields("count"), new FilterNull())
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例10: TridentKafkaSpout
import storm.kafka.StringScheme; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpout(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafka();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafka();
_kafkaConfig.forceFromStart = false;
}
示例11: TridentKafkaSpoutNmsp
import storm.kafka.StringScheme; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpoutNmsp(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaNmsp();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaNmsp();
_kafkaConfig.forceFromStart = false;
}
示例12: TridentKafkaSpoutLocation
import storm.kafka.StringScheme; //导入依赖的package包/类
/**
* Constructor
*
* @param config Config file to read properties from
* @param section Section of the kafka config file to read properties from.
*/
public TridentKafkaSpoutLocation(ConfigData config, String section) {
_kafkaConfig = new TridentKafkaConfig(new ZkHosts(config.getZkHost()), config.getTopic(section), "stormKafka");
_kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
_kafkaConfig.bufferSizeBytes = config.getFetchSizeKafkaLocation();
_kafkaConfig.fetchSizeBytes = config.getFetchSizeKafkaLocation();
_kafkaConfig.forceFromStart = false;
}
示例13: main
import storm.kafka.StringScheme; //导入依赖的package包/类
/**
* @param args
* http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
*/
public static void main(String[] args) {
try{
//设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数(6个)
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}
示例14: constructKafkaSpoutConf
import storm.kafka.StringScheme; //导入依赖的package包/类
private SpoutConfig constructKafkaSpoutConf() {
// BrokerHosts hosts = new ZkHosts(topologyConfig.getProperty("kafka.zookeeper.host.port"));
BrokerHosts hosts = new ZkHosts("localhost:2181");
String topic = "properties";
String zkRoot = "";
String consumerGroupId = "group1";
SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId);
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
return spoutConfig;
}
示例15: main
import storm.kafka.StringScheme; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
String zkIp = "localhost";
String nimbusHost = "sandbox.hortonworks.com";
String zookeeperHost = zkIp +":2181";
ZkHosts zkHosts = new ZkHosts(zookeeperHost);
List<String> zkServers = new ArrayList<String>();
zkServers.add(zkIp);
SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, "spertus-flight-events", "/spertus-flights-events","flight_id");
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
kafkaConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
kafkaConfig.zkServers = zkServers;
kafkaConfig.zkRoot = "/spertus-flight-events";
kafkaConfig.zkPort = 2181;
kafkaConfig.forceFromStart = true;
KafkaSpout kafkaSpout = new KafkaSpout(kafkaConfig);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("flight-events", kafkaSpout, 1);
builder.setBolt("flight-stats", new GetFlightStatsBolt(), 1).shuffleGrouping("flight-events");
Map conf = new HashMap();
conf.put(backtype.storm.Config.TOPOLOGY_WORKERS, 4);
conf.put(backtype.storm.Config.TOPOLOGY_DEBUG, true);
if (args != null && args.length > 0) {
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("flight-topology", conf, builder.createTopology());
}
}