本文整理汇总了Java中org.apache.flink.storm.api.FlinkTopology类的典型用法代码示例。如果您正苦于以下问题:Java FlinkTopology类的具体用法?Java FlinkTopology怎么用?Java FlinkTopology使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FlinkTopology类属于org.apache.flink.storm.api包,在下文中一共展示了FlinkTopology类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!ExclamationTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = ExclamationTopology.buildTopology();
// execute program locally
Config conf = new Config();
conf.put(ExclamationBolt.EXCLAMATION_COUNT, ExclamationTopology.getExclamation());
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology(TOPOLOGY_ID, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例2: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws AlreadyAliveException, InvalidTopologyException,
NotAliveException {
if (!WordCountTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = WordCountTopology.buildTopology();
// execute program on Flink cluster
final Config conf = new Config();
// can be changed to remote address
conf.put(Config.NIMBUS_HOST, "localhost");
// use default flink jobmanger.rpc.port
conf.put(Config.NIMBUS_THRIFT_PORT, 6123);
final FlinkClient cluster = FlinkClient.getConfiguredClient(conf);
cluster.submitTopology(topologyId, uploadedJarLocation, FlinkTopology.createTopology(builder));
Utils.sleep(5 * 1000);
cluster.killTopology(topologyId);
}
示例3: testProgram
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(spoutId, new MetaDataSpout(), 2);
builder.setBolt(boltId1, new VerifyMetaDataBolt(), 2).localOrShuffleGrouping(spoutId,
MetaDataSpout.STREAM_ID);
builder.setBolt(boltId2, new VerifyMetaDataBolt()).shuffleGrouping(boltId1,
VerifyMetaDataBolt.STREAM_ID);
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology(topologyId, null, FlinkTopology.createTopology(builder));
// run topology for 5 seconds
Utils.sleep(5 * 1000);
cluster.shutdown();
Assert.assertFalse(VerifyMetaDataBolt.errorOccured);
}
示例4: testProgram
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
final String[] tokens = this.resultPath.split(":");
final String outputFile = tokens[tokens.length - 1];
final TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(spoutId, new FiniteRandomSpout(0, 10, 2));
builder.setBolt(boltId, new TaskIdBolt(), 2).fieldsGrouping(
spoutId, FiniteRandomSpout.STREAM_PREFIX + 0, new Fields("number"));
builder.setBolt(sinkId, new BoltFileSink(outputFile)).shuffleGrouping(boltId);
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例5: testProgram
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
@Override
protected void testProgram() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
// get input data
builder.setSpout(spoutId1, new FiniteRandomSpout(0, 10));
builder.setSpout(spoutId2, new FiniteRandomSpout(1, 8));
builder.setSpout(spoutId3, new FiniteRandomSpout(2, 13));
builder.setBolt(boltId, new MergerBolt())
.shuffleGrouping(spoutId1, FiniteRandomSpout.STREAM_PREFIX + 0)
.shuffleGrouping(spoutId2, FiniteRandomSpout.STREAM_PREFIX + 0)
.shuffleGrouping(spoutId3, FiniteRandomSpout.STREAM_PREFIX + 0);
final String[] tokens = this.resultPath.split(":");
final String outputFile = tokens[tokens.length - 1];
builder.setBolt(sinkId, new BoltFileSink(outputFile)).shuffleGrouping(boltId);
// execute program locally
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例6: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!SplitSpoutTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = SplitSpoutTopology.buildTopology();
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology(topologyId, null, FlinkTopology.createTopology(builder));
// run topology for 5 seconds
Utils.sleep(5 * 1000);
cluster.shutdown();
}
示例7: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!SplitBoltTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = SplitBoltTopology.buildTopology();
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology(topologyId, null, FlinkTopology.createTopology(builder));
// run topology for 5 seconds
Utils.sleep(5 * 1000);
cluster.shutdown();
}
示例8: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("spout", new FileSpout("/tmp/device-data.txt"), 1);
topologyBuilder.setBolt("parser", new ParserBolt(), 1).shuffleGrouping("spout");
topologyBuilder.setBolt("tdrCassandra", new TDRCassandraBolt("localhost", "tdr"), 1).shuffleGrouping("parser", "tdrstream");
FlinkTopology.createTopology(topologyBuilder).execute();
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:10,代码来源:FlinkStormExample.java
示例9: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length < 4) {
System.err.println(
"Usage: PrintSampleStream <consumer-key> <consumer-secret> <access-token> <access-token-secret>");
return;
}
String consumerKey = args[0];
String consumerSecret = args[1];
String accessToken = args[2];
String accessTokenSecret = args[3];
// keywords start with the 5th parameter
String[] keyWords = Arrays.copyOfRange(args, 4, args.length);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
accessToken, accessTokenSecret, keyWords));
builder.setBolt("print", new PrinterBolt())
.shuffleGrouping("twitter");
Config conf = new Config();
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology("Print", conf, FlinkTopology.createTopology(builder));
Utils.sleep(10 * 1000);
cluster.shutdown();
}
示例10: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!WordCountTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = WordCountTopology.buildTopology();
// execute program on Flink cluster
final Config conf = new Config();
// We can set Jobmanager host/port values manually or leave them blank
// if not set and
// - executed within Java, default values "localhost" and "6123" are set by FlinkSubmitter
// - executed via bin/flink values from flink-conf.yaml are set by FlinkSubmitter.
// conf.put(Config.NIMBUS_HOST, "localhost");
// conf.put(Config.NIMBUS_THRIFT_PORT, new Integer(6123));
// The user jar file must be specified via JVM argument if executed via Java.
// => -Dstorm.jar=target/WordCount-StormTopology.jar
// If bin/flink is used, the jar file is detected automatically.
FlinkSubmitter.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
Thread.sleep(5 * 1000);
FlinkClient.getConfiguredClient(conf).killTopology(topologyId);
}
示例11: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!WordCountTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = WordCountTopology.buildTopology(false);
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例12: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(final String[] args) throws Exception {
if (!WordCountTopology.parseParameters(args)) {
return;
}
// build Topology the Storm way
final TopologyBuilder builder = WordCountTopology.buildTopology();
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
Config conf = new Config();
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true); // only required to stabilize integration test
cluster.submitTopology(topologyId, conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
示例13: main
import org.apache.flink.storm.api.FlinkTopology; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender", "hobbies"));
final FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));
Config conf = new Config();
TopologyBuilder builder = new TopologyBuilder();
// only required to stabilize integration test
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true);
final NullTerminatingSpout finalGenderSpout = new NullTerminatingSpout(genderSpout);
final NullTerminatingSpout finalAgeSpout = new NullTerminatingSpout(ageSpout);
builder.setSpout("gender", finalGenderSpout);
builder.setSpout("age", finalAgeSpout);
builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age")))
.fieldsGrouping("gender", new Fields("id"))
.fieldsGrouping("age", new Fields("id"));
// emit result
if (args.length > 0) {
// read the text file from given input path
builder.setBolt("fileOutput", new BoltFileSink(args[0], new TupleOutputFormatter()))
.shuffleGrouping("join");
} else {
builder.setBolt("print", new PrinterBolt()).shuffleGrouping("join");
}
String[] hobbies = new String[] {"reading", "biking", "travelling", "watching tv"};
for (int i = 0; i < 10; i++) {
String gender;
if (i % 2 == 0) {
gender = "male";
}
else {
gender = "female";
}
genderSpout.feed(new Values(i, gender, hobbies[i % hobbies.length]));
}
for (int i = 9; i >= 0; i--) {
ageSpout.feed(new Values(i, i + 20));
}
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology("joinTopology", conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}