本文整理匯總了Java中org.apache.storm.tuple.Fields類的典型用法代碼示例。如果您正苦於以下問題:Java Fields類的具體用法?Java Fields怎麽用?Java Fields使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Fields類屬於org.apache.storm.tuple包,在下文中一共展示了Fields類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildTopology
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static StormTopology buildTopology(WindowsStoreFactory windowStore, WindowConfig windowConfig) throws Exception {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts("localhost:2181"), "test");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(config);
TridentTopology topology = new TridentTopology();
Stream stream = topology.newStream("spout1", spout).each(new Fields("str"),
new Split(), new Fields("word"))
.window(windowConfig, windowStore, new Fields("word"), new CountAsAggregator(), new Fields("count"))
.peek(new Consumer() {
@Override
public void accept(TridentTuple tuple) {
System.out.print("[");
for (int index = 0; index < tuple.size(); index++) {
System.out.print(tuple.get(index));
if (index < (tuple.size() - 1))
System.out.print(",");
}
System.out.println("]");
}
});
return topology.build();
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:26,代碼來源:TridentWindowingInmemoryStoreTopology.java
示例2: main
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("this is simple example of trident topology"), new Values(
"this example count same words"));
spout.setCycle(true);
//TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(new TridentKafkaConfig(new ZkHosts("localhost:9091"), "test"));
TridentTopology topology = new TridentTopology();
MemoryMapState.Factory stateFactory = new MemoryMapState.Factory();
topology
.newStream("spout1", spout)
.each(new Fields("sentence"), new Split(), new Fields("word"))
.groupBy(new Fields("word")).persistentAggregate(stateFactory, new Count(),
new Fields("count")).newValuesStream()
.filter(new DisplayOutputFilter()).parallelismHint(6);
Config config = new Config();
config.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-example", config, topology.build());
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:22,代碼來源:BasicTridentTopology.java
示例3: main
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String args[]) {
TridentKafkaConfig config = new TridentKafkaConfig(new ZkHosts(
"localhost:2181"), "storm-trident-diy");
config.scheme = new SchemeAsMultiScheme(new StringScheme());
config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(
config);
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).filter(new ExcludePhoneNumber())
.each(new Fields("str"), new DeviceInfoExtractor(), new Fields("phone", "bytes"))
.groupBy(new Fields("phone"))
.aggregate(new Fields("bytes", "phone"), new Sum(), new Fields("sum"))
.applyAssembly(new FirstN(10, "sum"))
.each(new Fields("phone", "sum"), new Debug());
Config config1 = new Config();
config1.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-diy", config1, topology.build());
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:21,代碼來源:TridentDIY.java
示例4: buildTopology
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
private static TopologyBuilder buildTopology() throws Exception {
TopologyBuilder builder = new TopologyBuilder();
String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic");
String groupName = Configuration.getConfig().getString("rtc.mq.spout.group");
BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts"));
SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName);
spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(","));
spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort");
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task"));
builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout");
builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"}));
// builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"}));
return builder;
}
示例5: buildTopology
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static StormTopology buildTopology() {
FakeTweetSpout spout = new FakeTweetSpout(10);
TridentTopology topology = new TridentTopology();
topology.newStream("spout1", spout)
.shuffle()
.each(new Fields("text", "Country"),
new TridentUtility.TweetFilter())
.groupBy(new Fields("Country"))
.aggregate(new Fields("Country"), new Count(),
new Fields("count"))
.each(new Fields("count"), new TridentUtility.Print())
.parallelismHint(2);
return topology.build();
}
示例6: buildTopology
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static StormTopology buildTopology(LocalDRPC drpc) {
FakeTweetSpout spout = new FakeTweetSpout(10);
TridentTopology topology = new TridentTopology();
TridentState countryCount = topology.newStream("spout1", spout)
.shuffle()
.each(new Fields("text","Country"), new TridentUtility.TweetFilter()).groupBy(new Fields("Country"))
.persistentAggregate(new MemoryMapState.Factory(),new Fields("Country"), new Count(), new Fields("count"))
.parallelismHint(2);
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
}
topology.newDRPCStream("Count", drpc)
.each(new Fields("args"), new TridentUtility.Split(), new Fields("Country"))
.stateQuery(countryCount, new Fields("Country"), new MapGet(),
new Fields("count")).each(new Fields("count"),
new FilterNull());
return topology.build();
}
示例7: filterMessage
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void filterMessage() throws Exception {
RabbitMqMessageScheme rabbitMqMessageScheme = new SingleStreamRabbitMqMessageScheme() {
@Override
public void prepare(Map config, TopologyContext context) {
// no operation
}
@Override
public List<Object> convertToTuple(Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws Exception {
return null;
}
@Override
public Fields getOutputFields() {
return new Fields("stringField");
}
@Override
public void cleanup() {
// no operation
}
};
StreamedTuple streamedTuple = rabbitMqMessageScheme.convertToStreamedTuple(null, null, null);
assertNull(streamedTuple);
}
示例8: declareOutputFields
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void declareOutputFields() {
Map<String, Fields> outputFields = new HashMap<>(2);
String streamId1 = "testStream1",
streamId2 = "testStream2";
Fields fields1 = new Fields("field1", "field2"),
fields2 = new Fields("field3", "field4");
outputFields.put(streamId1, fields1);
outputFields.put(streamId2, fields2);
RabbitMqSpout rabbitMqSpout = new RabbitMqSpout(rabbitMqChannelProvider, new EmptyRabbitMqMessageScheme() {
@Override
public Map<String, Fields> getStreamsOutputFields() {
return outputFields;
}
});
OutputFieldsDeclarer mockOutputFieldsDeclarer = mock(OutputFieldsDeclarer.class);
rabbitMqSpout.declareOutputFields(mockOutputFieldsDeclarer);
verify(mockOutputFieldsDeclarer, times(1)).declareStream(streamId1, fields1);
verify(mockOutputFieldsDeclarer, times(1)).declareStream(streamId2, fields2);
}
示例9: provideOutputFields
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
/**
* Provides various inputs to be split.
*/
@DataProvider
public static Object[][] provideOutputFields() throws InstantiationException, IllegalAccessException {
return new Object[][] {
// String inputs, these get split and trimmed.
{ "key,value", new String[] {"key", "value"} },
{ "key, value", new String[] {"key", "value"} },
{ " key , value ,", new String[] {"key", "value"} },
// List of Strings, used as is.
{ Lists.newArrayList("key", "value"), new String[] { "key", "value"} },
{ Lists.newArrayList(" key ", " value"), new String[] { " key ", " value"} },
{ Lists.newArrayList("key,value", "another"), new String[] { "key,value", "another"} },
// Fields inputs, used as is.
{ new Fields("key", "value"), new String[] { "key", "value" } },
{ new Fields(" key ", " value"), new String[] { " key ", " value" } },
{ new Fields("key,value ", "another"), new String[] { "key,value ", "another" } },
};
}
示例10: declareOutputFields
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
try{
Global.loadingDataStructure("/Users/yuxiao/項目/stormSQL/code/SQLonStorm/src/main/resources/createtabledata.txt");
HashMap<String, MTable> dataBase = Global.DataBase;
MTable jData_Action_201603 = dataBase.get("JData_Product");
this.descOfOutputFields = new ArrayList<String>();
this.descOfOutputFields.add("Table");
for(MField mField: jData_Action_201603.getField()){
this.descOfOutputFields.add(mField.getName());
}
}catch (Exception e){
e.printStackTrace();
}
Fields fields = new Fields(this.descOfOutputFields);
outputFieldsDeclarer.declare(fields);
}
示例11: declareOutputFields
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
try{
Global.loadingDataStructure("/Users/yuxiao/項目/stormSQL/code/SQLonStorm/src/main/resources/createtabledata.txt");
HashMap<String, MTable> dataBase = Global.DataBase;
MTable jData_Action_201602 = dataBase.get("JData_Action_201602");
this.descOfOutputFields = new ArrayList<String>();
this.descOfOutputFields.add("Table");
for(MField mField: jData_Action_201602.getField()){
this.descOfOutputFields.add(mField.getName());
}
}catch (Exception e){
e.printStackTrace();
}
Fields fields = new Fields(this.descOfOutputFields);
outputFieldsDeclarer.declare(fields);
}
示例12: main
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
String mode = "";
if (args.length > 0) {
mode = args[0];
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("sentence-spout", new FixedSentenceSpout());
// SentenceSpout --> SplitSentenceBolt
builder.setBolt("split-bolt", new SplitSentenceBolt()).shuffleGrouping("sentence-spout");
// SplitSentenceBolt --> WordCountBolt
builder.setBolt("count-bolt", new WordCountBolt()).fieldsGrouping("split-bolt", new Fields("word"));
// WordCountBolt --> DisplayBolt
builder.setBolt("display-bolt", new DisplayBolt()).globalGrouping("count-bolt");
Config config = new Config();
if (mode.equals("cluster")) {
System.out.println("submitting on cluster mode");
StormSubmitter.submitTopology("word-count-topology", config, builder.createTopology());
} else {
System.out.println("submitting on local mode");
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-count-topology", config, builder.createTopology());
Thread.sleep(20000);
cluster.killTopology("word-count-topology");
cluster.shutdown();
}
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:33,代碼來源:BasicStormWordCountExample.java
示例13: main
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String args[]) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("a", "b"), 3,
new Values(1, 2), new Values(3, 4),
new Values(7, 3));
spout.setCycle(false);
TridentTopology topology = new TridentTopology();
topology.newStream("spout1", spout)
.each(new Fields("a", "b"), new PerformDiffFunction(),
new Fields("d")).peek(new Consumer() {
@Override
public void accept(TridentTuple tuple) {
System.out.print("[");
for (int index = 0; index < tuple.size(); index++) {
System.out.print(tuple.get(index));
if (index < (tuple.size() - 1))
System.out.print(",");
}
System.out.println("]");
}
});
Config config = new Config();
config.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-example", config,
topology.build());
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:29,代碼來源:TridentOperations.java
示例14: main
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String args[]) {
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
new Values("this is simple example of trident topology"),
new Values("this example count same words"));
spout.setCycle(false);
TridentTopology topology = new TridentTopology();
topology.newStream("spout1", spout).flatMap(new SplitMapFunction())
.map(new UpperCase()).peek(new Consumer() {
@Override
public void accept(TridentTuple tuple) {
System.out.print("[");
for (int index = 0; index < tuple.size(); index++) {
System.out.print(tuple.get(index));
if (index < (tuple.size() - 1))
System.out.print(",");
}
System.out.println("]");
}
});
Config config = new Config();
config.setNumWorkers(3);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("storm-trident-example", config,
topology.build());
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:28,代碼來源:TridentOperationsMap.java
示例15: declareOutputFields
import org.apache.storm.tuple.Fields; //導入依賴的package包/類
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
super.declareOutputFields(declarer);
// output of this module is the list of fields to index
// with at least the URL, text content
declarer.declare(new Fields("url", "content", "metadata", "text"));
}