当前位置: 首页>>代码示例>>Java>>正文


Java Utils.readStormConfig方法代码示例

本文整理汇总了Java中org.apache.storm.utils.Utils.readStormConfig方法的典型用法代码示例。如果您正苦于以下问题:Java Utils.readStormConfig方法的具体用法?Java Utils.readStormConfig怎么用?Java Utils.readStormConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.storm.utils.Utils的用法示例。


在下文中一共展示了Utils.readStormConfig方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: execute

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
private long execute(String arg, PrintWriter writer) throws TException, DRPCExecutionException {
  LOG.debug(String.format("executing %s('%s')", function, arg));
  try{
  DRPCClient client = new DRPCClient(Utils.readStormConfig(),DRPC_SERVER, port);
  long start = System.currentTimeMillis();
  String result = client.execute(function, arg);
  long end = System.currentTimeMillis();
  long latency = end - start;
  writer.println(String.format("%s('%s') = %s, latency = %d ms", function, arg, result, latency));
  writer.flush();
  return latency;
  }catch (Exception e){
      e.printStackTrace();
      return -1;
  }
}
 
开发者ID:MBtech,项目名称:stormbenchmark,代码行数:17,代码来源:DRPCMetricsCollector.java

示例2: main

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new FastRandomSentenceSpout(), 4);

    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);

    String name = "wc-test";
    if (args != null && args.length > 0) {
        name = args[0];
    }

    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());

    Map clusterConf = Utils.readStormConfig();
    clusterConf.putAll(Utils.readCommandLineOpts());
    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();

    //Sleep for 5 mins
    for (int i = 0; i < 10; i++) {
        Thread.sleep(30 * 1000);
        printMetrics(client, name);
    }
    kill(client, name);
  }
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:32,代码来源:FastWordCountTopology.java

示例3: C

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
public C(Map conf) {
  Map clusterConf = Utils.readStormConfig();
  if (conf != null) {
    clusterConf.putAll(conf);
  }
  Boolean isLocal = (Boolean)clusterConf.get("run.local");
  if (isLocal != null && isLocal) {
    _local = new LocalCluster();
  } else {
    _client = NimbusClient.getConfiguredClient(clusterConf).getClient();
  }
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:13,代码来源:ThroughputVsLatency.java

示例4: main

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new InOrderSpout(), 8);
    builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));

    Config conf = new Config();
    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);

    String name = "in-order-test";
    if (args != null && args.length > 0) {
        name = args[0];
    }

    conf.setNumWorkers(1);
    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());

    Map clusterConf = Utils.readStormConfig();
    clusterConf.putAll(Utils.readCommandLineOpts());
    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();

    //Sleep for 50 mins
    for (int i = 0; i < 50; i++) {
        Thread.sleep(30 * 1000);
        printMetrics(client, name);
    }
    kill(client, name);
  }
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:30,代码来源:InOrderDeliveryTest.java

示例5: createStormConf

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
/**
     * 创建storm需要的配置属性
     *
     */
    @SuppressWarnings("unchecked")
    public Map< String, Object > createStormConf()
    {
        Map< String, Object > conf = Maps.newHashMap();

        Map< String, Object > stormConf = Utils.readStormConfig();
        if (stormConf != null)
        {
            conf.putAll(stormConf);
        }

        conf.put(Config.NIMBUS_HOST, nimbusHost);
        conf.put(Config.NIMBUS_THRIFT_PORT, nimbusPort);
        conf.put(Config.STORM_THRIFT_TRANSPORT_PLUGIN, thriftTransportPlugin);
        conf.put(TASK_KILL_WAIT_SECONDS, killWaitingSeconds);
        conf.put(Config.TOPOLOGY_WORKERS, workerNumber);
        conf.put(Config.STORM_ZOOKEEPER_SERVERS, zkAddresses);
        conf.put(Config.STORM_ZOOKEEPER_PORT, zkPort);
        //在Streaming中禁用acker
        conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, 0);
//        conf.put(Config.STORM_SECURITY_AUTHENTICATION, AUTHENTICATION_NOSECURITY_VALUE);
//
//        if (isSecurity)
//        {
//            conf.put(Config.STORM_SECURITY_SASL_QOP, saslQop);
//            conf.put(Config.STORM_SECURITY_PRINCIPAL_INSTANCE, userPrincipalInstance);
//            conf.put(Config.STORM_SECURITY_AUTHENTICATION, AUTHENTICATION_SECURITY_VALUE);
//        }

        // 之所以在StormConf内部定义常量,而不引用Storm中的Config对象,就是为了保证在纯社区版下也可以执行
        /**
         * 本来计划移除内部storm的配置属性映射,
         * 但是由于Storm的配置属性是复合结构,中间可能包含Map或者List
         * 但是Streaming只允许字符串形式的配置属性,所以就没办法进行类型转换
         * 所以只能使用这种映射的方式
         */
        if (haZKConnectionTimeOut > 0)
        {
            conf.put(STORM_ZOOKEEPER_CONNECTION_TIMEOUT, haZKConnectionTimeOut);
        }

        if (haZKSessionTimeOut > 0)
        {
            conf.put(STORM_ZOOKEEPER_SESSION_TIMEOUT, haZKConnectionTimeOut);
        }

//        if (keyTabPath != null)
//        {
//            conf.put(Config.TOPOLOGY_KEYTAB_FILE, keyTabPath);
//        }
//
//        if (userPrincipal != null)
//        {
//            conf.put(Config.TOPOLOGY_KERBEROS_PRINCIPLE, userPrincipal);
//        }

        if (autoCredentials != null)
        {
            conf.put(Config.TOPOLOGY_AUTO_CREDENTIALS, autoCredentials);
        }

        return conf;
    }
 
开发者ID:HuaweiBigData,项目名称:StreamCQL,代码行数:68,代码来源:StormConf.java

示例6: runOnStormCluster

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
private static Nimbus.Client runOnStormCluster(Config conf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
    Map<String, Object> clusterConf = Utils.readStormConfig();
    StormSubmitter.submitTopologyWithProgressBar(TOPO_NAME, conf, topology);
    return NimbusClient.getConfiguredClient(clusterConf).getClient();
}
 
开发者ID:hortonworks,项目名称:streamline,代码行数:6,代码来源:WindowedQueryBolt_TestTopology.java

示例7: main

import org.apache.storm.utils.Utils; //导入方法依赖的package包/类
/** Copies text file content from sourceDir to destinationDir. Moves source files into sourceDir after its done consuming */
public static void main(String[] args) throws Exception {
  // 0 - validate args
  if (args.length < 7) {
    System.err.println("Please check command line arguments.");
    System.err.println("Usage :");
    System.err.println(HdfsSpoutTopology.class.toString() + " topologyName hdfsUri fileFormat sourceDir sourceArchiveDir badDir destinationDir.");
    System.err.println(" topologyName - topology name.");
    System.err.println(" hdfsUri - hdfs name node URI");
    System.err.println(" fileFormat -  Set to 'TEXT' for reading text files or 'SEQ' for sequence files.");
    System.err.println(" sourceDir  - read files from this HDFS dir using HdfsSpout.");
    System.err.println(" archiveDir - after a file in sourceDir is read completely, it is moved to this HDFS location.");
    System.err.println(" badDir - files that cannot be read properly will be moved to this HDFS location.");
    System.err.println(" spoutCount - Num of spout instances.");
    System.err.println();
    System.exit(-1);
  }

  // 1 - parse cmd line args
  String topologyName = args[0];
  String hdfsUri = args[1];
  String fileFormat = args[2];
  String sourceDir = args[3];
  String sourceArchiveDir = args[4];
  String badDir = args[5];
  int spoutNum = Integer.parseInt(args[6]);

  // 2 - create and configure spout and bolt
  ConstBolt bolt = new ConstBolt();

  HdfsSpout spout = new HdfsSpout().withOutputFields("line");
  Config conf = new Config();
  conf.put(Configs.SOURCE_DIR, sourceDir);
  conf.put(Configs.ARCHIVE_DIR, sourceArchiveDir);
  conf.put(Configs.BAD_DIR, badDir);
  conf.put(Configs.READER_TYPE, fileFormat);
  conf.put(Configs.HDFS_URI, hdfsUri);
  conf.setDebug(true);
  conf.setNumWorkers(1);
  conf.setNumAckers(1);
  conf.setMaxTaskParallelism(1);

  // 3 - Create and configure topology
  conf.setDebug(true);
  conf.setNumWorkers(WORKER_NUM);
  conf.registerMetricsConsumer(LoggingMetricsConsumer.class);

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(BOLT_ID, bolt, 1).shuffleGrouping(SPOUT_ID);

  // 4 - submit topology, wait for a few min and terminate it
  Map clusterConf = Utils.readStormConfig();
  StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, builder.createTopology());
  Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();

  // 5 - Print metrics every 30 sec, kill topology after 20 min
  for (int i = 0; i < 40; i++) {
    Thread.sleep(30 * 1000);
    FastWordCountTopology.printMetrics(client, topologyName);
  }
  FastWordCountTopology.kill(client, topologyName);
}
 
开发者ID:ziyunhx,项目名称:storm-net-adapter,代码行数:64,代码来源:HdfsSpoutTopology.java


注:本文中的org.apache.storm.utils.Utils.readStormConfig方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。