當前位置: 首頁>>代碼示例>>Java>>正文


Java Fields類代碼示例

本文整理匯總了Java中backtype.storm.tuple.Fields的典型用法代碼示例。如果您正苦於以下問題:Java Fields類的具體用法?Java Fields怎麽用?Java Fields使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Fields類屬於backtype.storm.tuple包,在下文中一共展示了Fields類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        int spout_Parallelism_hint = 1;
        int split_Parallelism_hint = 2;
        int count_Parallelism_hint = 2;

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
        builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
        String topologyName = RaceConfig.JstormTopologyName;

        try {
            StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
            //begin by Young
            
            //end by Young
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
 
開發者ID:yangliguang,項目名稱:preliminary.demo,代碼行數:25,代碼來源:RaceTopology.java

示例2: testSimpleWriteTopology

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList(ImmutableMap.of("one", 1, "two", 2));
    List doc2 = Collections.singletonList(ImmutableMap.of("OTP", "Otopeni", "SFO", "San Fran"));

    String target = index + "/simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("doc")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:20,代碼來源:AbstractStormSimpleBoltTests.java

示例3: testSimpleWriteTopology

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void testSimpleWriteTopology() throws Exception {
    List doc1 = Collections.singletonList("{\"reason\" : \"business\",\"airport\" : \"SFO\"}");
    List doc2 = Collections.singletonList("{\"participants\" : 5,\"airport\" : \"OTP\"}");

    String target = index + "/json-simple-write";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-1", new TestSpout(ImmutableList.of(doc1, doc2), new Fields("json")));
    builder.setBolt("es-bolt-1", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-1");

    MultiIndexSpoutStormSuite.run(index + "json-simple", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(10));

    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(target));
    String results = RestUtils.get(target + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:20,代碼來源:AbstractStormJsonSimpleBoltTests.java

示例4: test1WriteIndexPattern

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void test1WriteIndexPattern() throws Exception {
    List doc1 = ImmutableList.of("one", "1", "two", "2", "number", 1);
    List doc2 = ImmutableList.of("OTP", "Otopeni", "SFO", "San Fran", "number", 2);

    String target = index + "/write-{number}";
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("test-spout-3", new TestSpout(ImmutableList.of(doc2, doc1), new Fields("key1", "val1", "key2",
            "val2", "key3", "number")));
    builder.setBolt("es-bolt-3", new TestBolt(new EsBolt(target, conf))).shuffleGrouping("test-spout-3");

    MultiIndexSpoutStormSuite.run(index + "write-pattern", builder.createTopology(), COMPONENT_HAS_COMPLETED);

    COMPONENT_HAS_COMPLETED.waitFor(1, TimeValue.timeValueSeconds(20));

    Thread.sleep(1000);
    RestUtils.refresh(index);
    assertTrue(RestUtils.exists(index + "/write-1"));
    assertTrue(RestUtils.exists(index + "/write-2"));

    String results = RestUtils.get(index + "/write-1" + "/_search?");
    assertThat(results, containsString("two"));

    results = RestUtils.get(index + "/write-2" + "/_search?");
    assertThat(results, containsString("SFO"));
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:27,代碼來源:AbstractStormIndexPatternBoltTests.java

示例5: macVendorTest

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void macVendorTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("macVendor");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
開發者ID:redBorder,項目名稱:rb-bi,代碼行數:39,代碼來源:TopologyFunctionTest.java

示例6: nonTimestampTest

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void nonTimestampTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);


    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(true, stormFlow.contains("timestamp"));
    }
}
 
開發者ID:redBorder,項目名稱:rb-bi,代碼行數:34,代碼來源:TopologyFunctionTest.java

示例7: analizeHttpUrlTest

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("httpUrlMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
開發者ID:redBorder,項目名稱:rb-bi,代碼行數:37,代碼來源:TopologyFunctionTest.java

示例8: declareOutputFields

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
    LOG.info("START declareOutputFields");
    if (terminalBolt || EmptyUtils.nullOrEmpty(outputFieldNames)) {
        LOG.info("This is TERMINAL BOLT");
        terminalBolt = true;
        return;
    }

    declarer.declare(new Fields(outputFieldNames));
}
 
開發者ID:qiozas,項目名稱:sourcevirtues-samples,代碼行數:12,代碼來源:MorphlinesBolt.java

示例9: declareOutputFields

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public void declareOutputFields(OutputFieldsDeclarer declarer) {
	declarer.declareStream(ChannelTopology.TRANSFER_STREAM, 
			new Fields("channel","code","timestamp","num","ratio"));
	declarer.declareStream(ChannelTopology.OPENTSDB_STREAM, 
			new Fields("channel","code","timestamp","num","ratio"));	
	declarer.declareStream(ChannelTopology.HBASE_STREAM, 
			new Fields("rowkey","column","columnvalue"));
}
 
開發者ID:zhai3516,項目名稱:storm-demos,代碼行數:9,代碼來源:CalculateBolt.java

示例10: main

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) {
	LocalCluster cluster = new LocalCluster();

	
	/* begin young-define*/
	Config conf = new Config();
	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SpoutLocal(), 1);
       builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
       builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
       /* end young-define */
       
	
	//建議加上這行,使得每個bolt/spout的並發度都為1
	conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);

	//提交拓撲
	cluster.submitTopology("SequenceTest", conf, builder.createTopology());

	//等待1分鍾, 1分鍾後會停止拓撲和集群, 視調試情況可增大該數值
	try {
		Thread.sleep(60000);
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}        

	//結束拓撲
	cluster.killTopology("SequenceTest");

	cluster.shutdown();
}
 
開發者ID:yangliguang,項目名稱:preliminary.demo,代碼行數:33,代碼來源:RaceTopologyLocal.java

示例11: main

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) 
    throws Exception {
  
  Config conf = new Config();
  LocalCluster cluster = new LocalCluster();
  
  TridentTopology topology = new TridentTopology();

  Stream movingAvgStream =
    topology.newStream("ticks-spout", buildSpout())
    .each(new Fields("stock-ticks"), new TickParser(), new Fields("price"))
    .aggregate(new Fields("price"), new CalculateAverage(), new Fields("count"));

  cluster.submitTopology("moving-avg", conf, topology.build());
}
 
開發者ID:amitchmca,項目名稱:hadooparchitecturebook,代碼行數:16,代碼來源:MovingAvgLocalTopologyRunner.java

示例12: main

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
		topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
		topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);

		topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
				.shuffleGrouping("spout-number")
				.shuffleGrouping("spout-string")
				.shuffleGrouping("spout-sign");

		topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
				.fieldsGrouping("bolt-splitter", new Fields("type"));

		topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
				.shuffleGrouping("bolt-distributor", "stream-number-saver");
		topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
				.shuffleGrouping("bolt-distributor", "stream-string-saver");
		topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
				.shuffleGrouping("bolt-distributor", "stream-sign-saver");

		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
開發者ID:cutoutsy,項目名稱:miner,代碼行數:39,代碼來源:TopologyMain.java

示例13: declareOutputFields

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Override
public void declareOutputFields(OutputFieldsDeclarer arg0) {
  arg0.declare(new Fields("trade"));
  arg0.declareStream("oddstream", new Fields("trade"));
  arg0.declareStream("evenstream", new Fields("trade"));

}
 
開發者ID:techysoul,項目名稱:java,代碼行數:8,代碼來源:DeliveryCheckBolt.java

示例14: TestSpout

import backtype.storm.tuple.Fields; //導入依賴的package包/類
public TestSpout(List<List> tuples, Fields output) {
    this.tuples = tuples;
    this.fields = output;
    this.spout = null;
    DONE_TUPLE = new ArrayList(output.size());
    for (int i = 0; i < output.size(); i++) {
        DONE_TUPLE.add(DONE);
    }
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:10,代碼來源:TestSpout.java

示例15: format

import backtype.storm.tuple.Fields; //導入依賴的package包/類
@Override
public byte[] format(Tuple tuple) {
    StringBuilder sb = new StringBuilder();
    Fields fields = this.fields == null ? tuple.getFields() : this.fields;
    int size = fields.size();
    for(int i = 0; i < size; i++){
        sb.append(tuple.getValueByField(fields.get(i)));
        if(i != size - 1){
            sb.append(this.fieldDelimiter);
        }
    }
    sb.append(this.recordDelimiter);
    return sb.toString().getBytes();
}
 
開發者ID:lovelock,項目名稱:storm-demo,代碼行數:15,代碼來源:DelimitedRecordFormat.java


注:本文中的backtype.storm.tuple.Fields類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。