當前位置: 首頁>>代碼示例>>Java>>正文


Java Config.get方法代碼示例

本文整理匯總了Java中backtype.storm.Config.get方法的典型用法代碼示例。如果您正苦於以下問題:Java Config.get方法的具體用法?Java Config.get怎麽用?Java Config.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在backtype.storm.Config的用法示例。


在下文中一共展示了Config.get方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: SetRemoteTopology

import backtype.storm.Config; //導入方法依賴的package包/類
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
開發者ID:zhangjunfang,項目名稱:jstorm-0.9.6.3-,代碼行數:24,代碼來源:SequenceTopologyTool.java

示例2: checkConfiguration

import backtype.storm.Config; //導入方法依賴的package包/類
private void checkConfiguration(Config stormConf) {

        // ensure that a value has been set for the agent name and that that
        // agent name is the first value in the agents we advertise for robot
        // rules parsing
        String agentName = (String) stormConf.get("http.agent.name");
        if (agentName == null || agentName.trim().length() == 0) {
            String message = "Fetcher: No agents listed in 'http.agent.name'"
                    + " property.";
            LOG.error(message);
            throw new IllegalArgumentException(message);
        }
    }
 
開發者ID:zaizi,項目名稱:alfresco-apache-storm-demo,代碼行數:14,代碼來源:FetcherBolt.java

示例3: buildTopology

import backtype.storm.Config; //導入方法依賴的package包/類
public static StormTopology buildTopology(Config conf, LocalDRPC drpc) {

        TridentTopology topology = new TridentTopology();

        //Kafka Spout
        BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
        TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_DOCUMENT_NAME));
        kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

        //ElasticSearch Persistent State
        Settings esSettings = ImmutableSettings.settingsBuilder()
                .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
                .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
                .build();
        StateFactory esStateFactory = new ESIndexState.Factory<JSONObject>(new ClientFactory.NodeClient(esSettings.getAsMap()), JSONObject.class);
        TridentState esStaticState = topology.newStaticState(esStateFactory);

        String esIndex = (String)(conf.get(CrawlerConfig.ELASTICSEARCH_INDEX_NAME));
        topology.newStream("docstream",spout)
                .each( new Fields("str"), new SplitDocStreamArgs(), new Fields("filename", "task", "user", "content"))
                .each( new Fields("filename", "task", "user"), new PrintFilter("Kafka"))
                .each( new Fields("filename","task","user","content"), new PrepareDocForElasticSearch(), new Fields("index","type","id","source") )
                .partitionPersist(esStateFactory, new Fields("index","type","id","source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()), new Fields());

        return topology.build();
    }
 
開發者ID:preems,項目名稱:realtime-event-processing,代碼行數:28,代碼來源:DocEventProcessingTopology.java

示例4: buildTopology

import backtype.storm.Config; //導入方法依賴的package包/類
public static StormTopology buildTopology(Config conf, LocalDRPC localDrpc) {
    TridentTopology topology = new TridentTopology();

    //Kafka Spout
    BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
    TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_NAME));
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

    //ElasticSearch Persistent State
    Settings esSettings = ImmutableSettings.settingsBuilder()
            .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
            .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
            .build();
    StateFactory esStateFactory = new ESIndexState.Factory<String>(new ClientFactory.NodeClient(esSettings.getAsMap()), String.class);
    TridentState esStaticState = topology.newStaticState(esStateFactory);

    //Topology
    topology.newStream("crawlKafkaSpout", spout).parallelismHint(5)
             //Splits url and depth information on receiving from Kafka
            .each(new Fields("str"), new SplitKafkaInput(), new Fields("url", "depth"))
            //Bloom Filter. Filters already crawled URLs
            .each(new Fields("url"), new URLFilter())
            //Download and Parse Webpage
            .each(new Fields("url"), new GetAdFreeWebPage(), new Fields("content_html", "title", "href"))//TODO Optimize
            //Add Href URls to Kafka queue
            .each(new Fields("href", "depth"), new KafkaProducerFilter())//TODO Replace with kafka persistent state.
            //Insert to Elasticsearch
            .each(new Fields("url", "content_html", "title"), new PrepareForElasticSearch(), new Fields("index", "type", "id", "source"))
            .partitionPersist(esStateFactory, new Fields("index", "type", "id", "source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()))
    ;

    //DRPC
    topology.newDRPCStream("search", localDrpc)
            .each(new Fields("args"), new SplitDRPCArgs(), new Fields("query_input"))
            .each(new Fields("query_input"), new BingAutoSuggest(0), new Fields("query_preProcessed"))//TODO return List of expanded query
            .each(new Fields("query_preProcessed"), new PrepareSearchQuery(), new Fields("query", "indices", "types"))
            .groupBy(new Fields("query", "indices", "types"))
            .stateQuery(esStaticState, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("results"))
    ;

    return topology.build();
}
 
開發者ID:skalmadka,項目名稱:web-crawler,代碼行數:44,代碼來源:WebCrawlerTopology.java

示例5: test

import backtype.storm.Config; //導入方法依賴的package包/類
@Test(timeout = 30000)
public void test() throws AlreadyAliveException, InvalidTopologyException, IOException {
	Config conf = new Config();
	
	if(System.getProperty("user.dir").endsWith("JUnitLoop")) {
		conf.put(FileReaderSpout.INPUT_FILE_NAME, "../aeolus/queries/lrb/src/test/resources/xway-");
		conf.put(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME, "../aeolus/queries/lrb/src/test/resources");
	} else {
		conf.put(FileReaderSpout.INPUT_FILE_NAME, "src/test/resources/xway-");
		conf.put(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME, "src/test/resources");
	}
	
	LinkedList<String> inputFiles = new LinkedList<String>();
	for(int i = 0; i < 10; ++i) {
		inputFiles.add(i + "-sample.dat");
	}
	conf.put(FileReaderSpout.INPUT_FILE_SUFFIXES, inputFiles);
	
	
	
	TopologyBuilder builder = new TopologyBuilder();
	final int dop = 1 + this.r.nextInt(10);
	builder.setSpout("Spout", new FileReaderSpout(), new Integer(dop));
	SpoutDataFileOutputBolt sink = new SpoutDataFileOutputBolt();
	builder.setBolt("Sink", new TimestampMerger(sink, 0), new Integer(1)).shuffleGrouping("Spout")
		.allGrouping("Spout", TimestampMerger.FLUSH_STREAM_ID);
	
	
	
	LocalCluster cluster = new LocalCluster();
	cluster.submitTopology("LR-SpoutTest", conf, builder.createTopology());
	Utils.sleep(10 * 1000);
	cluster.deactivate("LR-SpoutTest");
	Utils.sleep(1000);
	cluster.killTopology("LR-SpoutTest");
	Utils.sleep(5 * 1000); // give "kill" some time to clean up; otherwise, test might hang and time out
	cluster.shutdown();
	
	
	
	BufferedReader reader = new BufferedReader(new FileReader(
		(String)conf.get(SpoutDataFileOutputBolt.OUTPUT_DIR_NAME) + File.separator + "result.dat"));
	LinkedList<String> result = new LinkedList<String>();
	String line;
	while((line = reader.readLine()) != null) {
		result.add(line);
	}
	reader.close();
	
	LinkedList<String> expectedResult = new LinkedList<String>();
	for(String file : inputFiles) {
		reader = new BufferedReader(new FileReader((String)conf.get(FileReaderSpout.INPUT_FILE_NAME) + file));
		while((line = reader.readLine()) != null) {
			int p1 = line.indexOf(",");
			int p2 = line.indexOf(",", p1 + 1);
			expectedResult.add(line.substring(p1 + 1, p2) + "," + line);
		}
		reader.close();
	}
	Collections.sort(expectedResult);
	expectedResult.add("FLUSH");
	
	Assert.assertEquals(expectedResult, result);
}
 
開發者ID:mjsax,項目名稱:aeolus,代碼行數:65,代碼來源:FileReaderSpoutITCase.java

示例6: main

import backtype.storm.Config; //導入方法依賴的package包/類
public static void main(String[] args) throws IOException {
  Properties props = StormSamoaUtils.getProperties();

  String uploadedJarLocation = props.getProperty(StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
  if (uploadedJarLocation == null) {
    logger.error("Invalid properties file. It must have key {}",
        StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
    return;
  }

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));
  int numWorkers = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);

  Config conf = new Config();
  conf.putAll(Utils.readStormConfig());
  conf.putAll(Utils.readCommandLineOpts());
  conf.setDebug(false);
  conf.setNumWorkers(numWorkers);

  String profilerOption =
      props.getProperty(StormTopologySubmitter.YJP_OPTIONS_KEY);
  if (profilerOption != null) {
    String topoWorkerChildOpts = (String) conf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
    StringBuilder optionBuilder = new StringBuilder();
    if (topoWorkerChildOpts != null) {
      optionBuilder.append(topoWorkerChildOpts);
      optionBuilder.append(' ');
    }
    optionBuilder.append(profilerOption);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, optionBuilder.toString());
  }

  Map<String, Object> myConfigMap = new HashMap<String, Object>(conf);
  StringWriter out = new StringWriter();

  try {
    JSONValue.writeJSONString(myConfigMap, out);
  } catch (IOException e) {
    System.out.println("Error in writing JSONString");
    e.printStackTrace();
    return;
  }

  Config config = new Config();
  config.putAll(Utils.readStormConfig());

  NimbusClient nc = NimbusClient.getConfiguredClient(config);
  String topologyName = stormTopo.getTopologyName();
  try {
    System.out.println("Submitting topology with name: "
        + topologyName);
    nc.getClient().submitTopology(topologyName, uploadedJarLocation,
        out.toString(), stormTopo.getStormBuilder().createTopology());
    System.out.println(topologyName + " is successfully submitted");

  } catch (AlreadyAliveException aae) {
    System.out.println("Fail to submit " + topologyName
        + "\nError message: " + aae.get_msg());
  } catch (InvalidTopologyException ite) {
    System.out.println("Invalid topology for " + topologyName);
    ite.printStackTrace();
  } catch (TException te) {
    System.out.println("Texception for " + topologyName);
    te.printStackTrace();
  }
}
 
開發者ID:apache,項目名稱:incubator-samoa,代碼行數:70,代碼來源:StormTopologySubmitter.java

示例7: buildTopology

import backtype.storm.Config; //導入方法依賴的package包/類
public static StormTopology buildTopology(Config conf, LocalDRPC localDrpc) {
    TridentTopology topology = new TridentTopology();

    //Kafka Spout
    BrokerHosts zk = new ZkHosts(conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_NAME) + ":" +conf.get(CrawlerConfig.KAFKA_CONSUMER_HOST_PORT));
    TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(zk, (String) conf.get(CrawlerConfig.KAFKA_TOPIC_NAME));
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    //kafkaConfig.ignoreZkOffsets=true;
    OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpout(kafkaConfig);

    //ElasticSearch Persistent State
    Settings esSettings = ImmutableSettings.settingsBuilder()
            .put("storm.elasticsearch.cluster.name", conf.get(CrawlerConfig.ELASTICSEARCH_CLUSTER_NAME))
            .put("storm.elasticsearch.hosts", conf.get(CrawlerConfig.ELASTICSEARCH_HOST_NAME) + ":" + conf.get(CrawlerConfig.ELASTICSEARCH_HOST_PORT))
            .build();
    StateFactory esStateFactory = new ESIndexState.Factory<JSONObject>(new ClientFactory.NodeClient(esSettings.getAsMap()), JSONObject.class);
    TridentState esStaticState = topology.newStaticState(esStateFactory);

    //Topology
    topology.newStream("crawlKafkaSpout", spout).parallelismHint(5)
            //Splits words on receiving from Kafka
            .each(new Fields("str"), new SplitFunction(), new Fields("url", "depth", "task", "user"))
            .each(new Fields("str"), new PrintFilter("Kafka"))
            //Bloom Filter, Filters already crawled URLs
            .each(new Fields("url", "task"), new URLFilter())
            //Download and Parse Webpage
            .each(new Fields("url"), new GetAdFreeWebPage(), new Fields("content_html", "title", "href"))
            //Sending URLs present in the page into the kafka queue.
            .each(new Fields("href", "depth", "task", "user"), new KafkaProducerFilter())
            //Insert to Elasticsearch
            .each(new Fields("url", "content_html", "title", "task", "user"), new PrepareForElasticSearch(), new Fields("index", "type", "id", "source"))
            .partitionPersist(esStateFactory, new Fields("index", "type", "id", "source"), new ESIndexUpdater<String>(new ESTridentTupleMapper()), new Fields())
            ;

    //DRPC
    topology.newDRPCStream("search", localDrpc)
            .each(new Fields("args"), new SplitDRPCArgs(), new Fields("query_input", "task"))
            .each(new Fields("query_input"), new BingAutoSuggest(0), new Fields("query_preProcessed"))
            .each(new Fields("query_preProcessed", "task"), new PrepareSearchQuery(), new Fields("query", "indices", "types"))
            .groupBy(new Fields("query", "indices", "types"))
            .stateQuery(esStaticState, new Fields("query", "indices", "types"), new QuerySearchIndexQuery(), new Fields("results"))
            ;

    return topology.build();
}
 
開發者ID:preems,項目名稱:realtime-event-processing,代碼行數:46,代碼來源:URLEventProcessingTopology.java


注:本文中的backtype.storm.Config.get方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。