本文整理汇总了Java中storm.trident.TridentTopology.build方法的典型用法代码示例。如果您正苦于以下问题:Java TridentTopology.build方法的具体用法?Java TridentTopology.build怎么用?Java TridentTopology.build使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类storm.trident.TridentTopology
的用法示例。
在下文中一共展示了TridentTopology.build方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology()
{
TridentTopology topology = new TridentTopology();
RandomWordSpout spout1 = new RandomWordSpout();
Stream inputStream = topology.newStream("faltu", spout1);//faltu isnt used anywhere.
/**
* partitionPersist : The partitionPersist operation updates a source of state.
* It returns a TridentState object. You could then use this state in stateQuery operations elsewhere in the topology.
* Args:
* StateFactory instance - This factory implement the makeState API, that should return a instance of State.
* Fields list, that needs to be persisted. These field list should be present in the input stream.
* StateUpdater instance - The StateUpdater instance will update the underlying State.
*/
inputStream
.partitionPersist(new RedisStoreStateFactory(), new Fields("randomWord"), new RedisStoreStateUpdater());
return topology.build();
}
示例2: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology()
{
TridentTopology topology = new TridentTopology();
RandomPhraseSpout spout1 = new RandomPhraseSpout();
Stream inputStream = topology.newStream("dumbo", spout1).parallelismHint(20);//where is dumbo used ? No where as per as I see.
/**
* persistentAggregate : The persistentAggregate operation updates a source of state.
* persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a
* Trident aggregator and use it to apply updates to the source of state.
* Args:
* StateFactory instance - This factory implement the makeState API, that should return a instance of State.
* Fields list, that needs to be persisted. These field list should be present in the input stream.
* StateUpdater instance - The StateUpdater instance will update the underlying State.
*/
inputStream
//input stream generated by spout1 has a field called randomPhrase.
//RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
.each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord")).parallelismHint(6);
return topology.build();
}
示例3: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
new Values("how many apples can you eat"), new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
new Count(), new Fields("count")).parallelismHint(16);
topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
"word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例4: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the$$cow$$jumped$$over$$the$$moon"),
new Values("the$$man$$went$$to$$the$$store$$and$$bought$$some$$candy"),
new Values("four$$score$$and$$seven$$years$$ago"),
new Values("how$$many$$apples$$can$$you$$eat"),
new Values("to$$be$$or$$not$$to$$be$$the$$person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(6);
topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word"))
.groupBy(new Fields("word"))
.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
.each(new Fields("count"), new FilterNull())
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例5: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(Config conf) {
ITridentSpout<Object> spout;
if (!ConfigUtil.getBoolean(conf, "spout.redis", false)) {
spout = new OneSentencePerBatchSpout();
} else {
String host = (String) conf.get("redis.host");
int port = ((Number) conf.get("redis.port")).intValue();
String queue = (String) conf.get("redis.queue");
spout = null;
}
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).parallelismHint(ConfigUtil.getInt(conf, "spout.parallelism", 1))
.each(new Fields("sentence"), new Split(), new Fields("word"))
.parallelismHint(ConfigUtil.getInt(conf, "split.parallelism", 1))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(ConfigUtil.getInt(conf, "counter.parallelism", 1));
return topology.build();
}
示例6: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
@Override
protected StormTopology buildTopology() {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"),
new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
ESIndexState.Factory<Tweet> factory = new ESIndexState.Factory<>(getLocalClient(), Tweet.class);
TridentTopology topology = new TridentTopology();
TridentState state = topology.newStream("tweets", spout)
.partitionPersist(factory, new Fields("sentence"), new ESIndexUpdater(new MyTridentTupleMapper()));
topology.newDRPCStream("search", drpc)
.each(new Fields("args"), new ExtractSearchArgs(), new Fields("query", "indices", "types"))
.groupBy(new Fields("query", "indices", "types"))
.stateQuery(state, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("tweet"))
.each(new Fields("tweet"), new FilterNull())
.each(new Fields("tweet"), new CreateJson(), new Fields("json"))
.project(new Fields("json"));
return topology.build();
}
示例7: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
/**
* Topology builder
* @return
*/
public static StormTopology buildTopology() {
TridentTopology topology = new TridentTopology();
TridentState urlsState = topology.newStaticState(new UrlsDBFactory());
//define the stream
topology.newStream("countStream", new Spout())
.each(new Fields(Spout.CLICK), new LongUrlEmitter(), new Fields(LongUrlEmitter.URL))
// .parallelismHint(5)
.partitionBy(new Fields(LongUrlEmitter.URL))
.partitionPersist(new UrlsDBFactory(), new Fields(LongUrlEmitter.URL), new ProbCountBolt(),
new Fields(LongUrlEmitter.URL, ProbCountBolt.CURR_URL_COUNT))
// .parallelismHint(5)
.newValuesStream()
.shuffle()
.aggregate(new Fields(LongUrlEmitter.URL, ProbCountBolt.CURR_URL_COUNT),
new FirstNAggregator(1, ProbCountBolt.CURR_URL_COUNT, true),
new Fields(LongUrlEmitter.URL, ProbCountBolt.CURR_URL_COUNT))
.each(new Fields(LongUrlEmitter.URL, ProbCountBolt.CURR_URL_COUNT), new Debug());
return topology.build();
}
示例8: getTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
@Override
public StormTopology getTopology(Config config) {
final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);
spout = new TransactionalTridentKafkaSpout(
KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.parallelismHint(splitNum)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(countNum);
/* trident.newStream("wordcount", spout)
.each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/
return trident.build();
}
示例9: buildDevicesTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
/**
* Creates a topology with device-id and count (which are whole numbers) as
* tuple fields in a stream and it finally generates result stream based on
* min amd max with device-id and count values.
*/
public static StormTopology buildDevicesTopology() {
String deviceID = "device-id";
String count = "count";
Fields allFields = new Fields(deviceID, count);
RandomNumberGeneratorSpout spout = new RandomNumberGeneratorSpout(allFields, 10, 1000);
TridentTopology topology = new TridentTopology();
Stream devicesStream = topology.newStream("devicegen-spout", spout).each(allFields, new Debug("##### devices"));
devicesStream.minBy(deviceID).each(allFields, new Debug("#### device with min id"));
devicesStream.maxBy(count).each(allFields, new Debug("#### device with max count"));
return topology.build();
}
示例10: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(LocalDRPC drpc) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("word"), 3, new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).flatMap(split).map(toUpper)
.filter(theFilter).peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
System.out.println(input.getString(0));
}
}).groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(16);
topology.newDRPCStream("words", drpc).flatMap(split).groupBy(new Fields("args"))
.stateQuery(wordCounts, new Fields("args"), new MapGet(), new Fields("count")).filter(new FilterNull())
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
return topology.build();
}
示例11: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {
TridentTopology topology = new TridentTopology();
TridentState count =
topology
.newStream("tweets", spout)
.each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
.project(new Fields("content", "user"))
.each(new Fields("content"), new OnlyHashtags())
.each(new Fields("user"), new OnlyEnglish())
.each(new Fields("content", "user"), new ExtractFollowerClassAndContentName(), new Fields("followerClass", "contentName"))
.parallelismHint(3)
.groupBy(new Fields("followerClass", "contentName"))
.persistentAggregate(new HazelCastStateFactory(), new Count(), new Fields("count"))
.parallelismHint(3)
;
topology
.newDRPCStream("hashtag_count")
.each(new Constants<String>("< 100", "< 10K", "< 100K", ">= 100K"), new Fields("followerClass"))
.stateQuery(count, new Fields("followerClass", "args"), new MapGet(), new Fields("count"))
;
return topology.build();
}
示例12: buildVehiclesTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
/**
* Creates a topology which demonstrates min/max operations on tuples of
* stream which contain vehicle and driver fields with values
* {@link TridentMinMaxOfDevicesTopology.Vehicle} and
* {@link TridentMinMaxOfDevicesTopology.Driver} respectively.
*/
public static StormTopology buildVehiclesTopology() {
Fields driverField = new Fields(Driver.FIELD_NAME);
Fields vehicleField = new Fields(Vehicle.FIELD_NAME);
Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME);
FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
Stream vehiclesStream = topology.newStream("spout1", spout).each(allFields, new Debug("##### vehicles"));
Stream slowVehiclesStream = vehiclesStream.min(new SpeedComparator()).each(vehicleField,
new Debug("#### slowest vehicle"));
Stream slowDriversStream = slowVehiclesStream.project(driverField).each(driverField,
new Debug("##### slowest driver"));
vehiclesStream.max(new SpeedComparator()).each(vehicleField, new Debug("#### fastest vehicle"))
.project(driverField).each(driverField, new Debug("##### fastest driver"));
vehiclesStream.max(new EfficiencyComparator()).each(vehicleField, new Debug("#### efficient vehicle"));
return topology.build();
}
示例13: getTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public StormTopology getTopology(Config config) {
this.spout = new FixedBatchSpout(new Fields("sentence"), 20,
new Values("one two"),
new Values("two three"),
new Values("three four"),
new Values("four five"),
new Values("five six")
);
TridentTopology trident = new TridentTopology();
trident.newStream("wordcount", spout).name("sentence").parallelismHint(1).shuffle()
.each(new Fields("sentence"), new Split(), new Fields("word"))
.parallelismHint(1)
.groupBy(new Fields("word"))
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
.parallelismHint(1);
return trident.build();
}
示例14: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
private static StormTopology buildTopology(final MlStormSpout mlStormSpout,
final int parallelism,
final int pcaRowWidth,
final int numPrincipalComponents,
final FieldTemplate template) {
final TridentTopology topology = new TridentTopology();
final Stream sensorStream = topology.newStream(FieldTemplate.FieldConstants.PCA.PCA, mlStormSpout);
final StateFactory pcaFactory = new WindowedPcaFactory(pcaRowWidth, numPrincipalComponents, template);
final TridentState principalComponents =
sensorStream
.partitionPersist(pcaFactory, new Fields(template.getKeyField(), template.getFeatureVectorField()), new PrincipalComponentUpdater(template))
.parallelismHint(parallelism);
topology.newDRPCStream(FieldTemplate.FieldConstants.PCA.PCA_DRPC)
.broadcast()
.stateQuery(principalComponents, new Fields(FieldTemplate.FieldConstants.ARGS), new PrincipalComponentsQuery(), new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS))
.project(new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS))
.aggregate(new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS), new PrincipalComponentsAggregator(), new Fields(FieldTemplate.FieldConstants.PCA.PCA_EIGEN))
.project(new Fields(FieldTemplate.FieldConstants.PCA.PCA_EIGEN));
return topology.build();
}
示例15: buildTopology
import storm.trident.TridentTopology; //导入方法依赖的package包/类
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig)
throws Exception {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("the cow jumped over the moon"),
new Values("the man went to the store and bought some candy"),
new Values("four score and seven years ago"), new Values("how many apples can you eat"),
new Values("to be or not to be the person"));
spout.setCycle(true);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout).parallelismHint(16)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new Consumer() {
@Override
public void accept(TridentTuple input) {
LOG.info("Received tuple: [{}]", input);
}
});
return topology.build();
}