当前位置: 首页>>代码示例>>Java>>正文


Java TopologyInfo.get_executors方法代码示例

本文整理汇总了Java中backtype.storm.generated.TopologyInfo.get_executors方法的典型用法代码示例。如果您正苦于以下问题:Java TopologyInfo.get_executors方法的具体用法?Java TopologyInfo.get_executors怎么用?Java TopologyInfo.get_executors使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在backtype.storm.generated.TopologyInfo的用法示例。


在下文中一共展示了TopologyInfo.get_executors方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTopologyTPS

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
protected long getTopologyTPS(TopologySummary topology, Client client) throws NotAliveException, TException{
    long topologyTps = 0l;
    String topologyId = topology.get_id();
    if(topologyId.startsWith("ClusterMonitor")){
        return topologyTps;
    }
    TopologyInfo topologyInfo = client.getTopologyInfo(topologyId);
    if(topologyInfo == null){
        return topologyTps;
    }
    List<ExecutorSummary> executorSummaryList = topologyInfo.get_executors();
    for(ExecutorSummary executor : executorSummaryList){
        topologyTps += getComponentTPS(executor);
    }
    LOGGER.info("topology = " + topology.get_name() + ", tps = " + topologyTps);
    return topologyTps;
}
 
开发者ID:kkllwww007,项目名称:jstrom,代码行数:18,代码来源:ClusterInfoBolt.java

示例2: getExecutorHost

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Returns the host of the specific executor in the pipeline.
 * 
 * @return the host of the executor (or <b>null</b> if not found)
 */
public String getExecutorHost() {
    List<String> hosts = new ArrayList<String>();
    TopologyInfo topologyInfo = getTopologyInfo(pipelineName);
    if (topologyInfo != null) {
        List<ExecutorSummary> executors = topologyInfo.get_executors();
        for (int e = 0; e < executors.size(); e++) {
            ExecutorSummary executor = executors.get(e);
            String nodeName = executor.get_component_id();
            if (nodeName.equals(executorName)) {
                hosts.add(executor.get_host());
            }
        }
    }
    return shuffleHost(hosts);
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:21,代码来源:CollectingTopologyInfo.java

示例3: collectExecutors

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Collects a mapping of the actual executors.
 * 
 * @param topoInfo the dynamic Storm topology information
 * @return a mapping between the executor name and the executor information instance
 */
private static Map<String, ExecutorSummary> collectExecutors(TopologyInfo topoInfo) {
    Map<String, ExecutorSummary> result = new HashMap<String, ExecutorSummary>();
    List<ExecutorSummary> executors = topoInfo.get_executors();
    if (null != executors) {
        for (int e = 0; e < executors.size(); e++) {
            ExecutorSummary executor = executors.get(e);
            result.put(executor.get_component_id(), executor);
        }
    }
    return result;
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:18,代码来源:Utils.java

示例4: addExecutors

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Adds pseudo-executors for testing to <code>topoInfo</code>. Avoids adding already known executors twice.
 * 
 * @param topoInfo the information instance to be modified
 * @param executors the names of the executors
 */
private static void addExecutors(TopologyInfo topoInfo, List<String> executors) {
    Set<String> known = new HashSet<String>();
    if (topoInfo.get_executors_size() > 0) {
        for (ExecutorSummary e : topoInfo.get_executors()) {
            known.add(e.get_component_id()); 
        }
    }
    for (String n : executors) {
        if (!known.contains(n)) {
            topoInfo.add_to_executors(new ExecutorSummary(new ExecutorInfo(1, 1), n, "localhost", 1234, 10));
        }
    }
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:20,代码来源:ManualTopologyCreator.java

示例5: taskComponentMapping

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Maps executor tasks from <code>info</code>.
 * 
 * @param info the topology info containing the executor-taskid mapping
 * @return a task-id-to-executors mapping
 */
public static Map<Integer, String> taskComponentMapping(TopologyInfo info) {
    Map<Integer, String> result = new HashMap<Integer, String>();
    for (ExecutorSummary executor : info.get_executors()) {
        ExecutorInfo eInfo = executor.get_executor_info();
        for (int t = eInfo.get_task_start(); t <= eInfo.get_task_end(); t++) {
            result.put(t, executor.get_component_id());
        }
    }
    return result;
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:17,代码来源:ZkUtils.java

示例6: getUsedPorts

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Returns the ports used by a topology.
 * 
 * @param topology the topology to return the ports for
 * @return the used ports
 */
public static Set<Integer> getUsedPorts(TopologyInfo topology) {
    Set<Integer> result = new HashSet<Integer>();
    List<ExecutorSummary> executors = topology.get_executors();
    for (int e = 0; e < executors.size(); e++) {
        result.add(executors.get(e).get_port());
    }
    return result;
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:15,代码来源:ThriftConnection.java

示例7: getUsedPort

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Returns the ports used by a topology on <code>host</code>.
 * 
 * @param topology the topology to return the ports for
 * @param host the host name
 * @return the used ports
 */
public static Set<Integer> getUsedPort(TopologyInfo topology, String host) {
    Set<Integer> result = new HashSet<Integer>();
    List<ExecutorSummary> executors = topology.get_executors();
    for (int e = 0; e < executors.size(); e++) {
        ExecutorSummary executor = executors.get(e);
        if (executor.get_host().equals(host)) {
            result.add(executor.get_port());
        }
    }
    return result;
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:19,代码来源:ThriftConnection.java

示例8: aggregateTopology

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
/**
 * Aggregates the values for the topology.
 * 
 * @param topology the topology information to be aggregated
 * @return the affected / modified pipeline system part, may be <b>null</b> if the pipeline / topology yet does 
 *     not exist 
 * @throws NotAliveException in case that the requested topology is not alive
 * @throws TException in case of problems accessing the remote topology info
 */
private PipelineSystemPart aggregateTopology(TopologyInfo topology) throws TException, NotAliveException {
    PipelineSystemPart part = null;
    INameMapping mapping = MonitoringManager.getNameMapping(pipeline);
    int executorStartWaitingTime = getExecutorStartWaitingTime();
    if (null != mapping) {
        part = preparePipelineAggregation(topology, mapping);
        List<ExecutorSummary> executors = topology.get_executors();
        PipelineStatistics pStat = new PipelineStatistics(part);
        List<String> uptime = new ArrayList<String>();
        List<String> eventsReceived = new ArrayList<String>();
        int executorRunningCount = 0; // first heuristics... uptime of executors - does not work in every case
        List<String> nonInternal = new ArrayList<String>(); // second heuristics... non-legacy pipelines via events
        int nonInternalRunningCount = 0;
        for (int e = 0; e < executors.size(); e++) {
            ExecutorSummary executor = executors.get(e);
            String nodeName = executor.get_component_id();
            if (executorStartWaitingTime > 0 && executor.get_uptime_secs() > executorStartWaitingTime) { 
                executorRunningCount++;
                uptime.add(nodeName);
            }
            boolean isInternal = Utils.isInternal(executor); 
            ExecutorStats stats = executor.get_stats();
            PipelineNodeSystemPart nodePart = check(SystemState.getNodePart(mapping, part, nodeName), isInternal);
            if (!isInternal) {
                nonInternal.add(nodeName);
                if (isUp(nodePart, executor)) {
                    nonInternalRunningCount++;
                    eventsReceived.add(nodeName);
                }
            }
            if (null != stats) {
                if (isInternal) {
                    nodeName = "_SYSTEM_"; // TODO check whether a special node is better
                }
                if (doThrift(executor, nodePart, isInternal)) { // non-thrift happens along the events
                    aggregateExecutor(executor, nodePart, isInternal);
                }
                if (!isInternal && nodePart.getParent() instanceof PipelineSystemPart) { // pipeline -> reduce load
                    sendSummaryEvent(nodePart, part.getName(), MonitoringManager.DEMO_MSG_PROCESSING_ELEMENT);
                }
            } // no stats... in particular if
            pStat.collect(nodePart);
        }
        debugExecutors(executors, mapping, part);
        
        boolean allInitialized = pStat.commit();
        sendSummaryEvent(part, null, MonitoringManager.DEMO_MSG_PIPELINE);
        boolean createdChanged = false;
        if ((PipelineLifecycleEvent.Status.UNKNOWN == part.getStatus() // shall not happen 
            || PipelineLifecycleEvent.Status.STARTING == part.getStatus())) {
            // consider pipeline creation finished as soon as all executors are running and ready to work
            LOGGER.info("Trying to elevate '" + part.getName() + "' to CREATED: uptime " + uptime + " " 
                + executors.size() + " " + executorRunningCount + " events expected " + nonInternal + " received " 
                + eventsReceived + " " + nonInternal.size() + " " + nonInternalRunningCount);
            if (executors.size() == executorRunningCount || nonInternal.size() == nonInternalRunningCount) {
                part.changeStatus(PipelineLifecycleEvent.Status.CREATED, true);
                createdChanged = true;
            }
        } 
        if (!createdChanged && PipelineLifecycleEvent.Status.CREATED == part.getStatus()) {
            logElevating(part, pStat);
            if (allInitialized && areSubpipelinesUp(part.getName())) {
                part.changeStatus(PipelineLifecycleEvent.Status.INITIALIZED, true);
            }
        }
    } else {
        LOGGER.error("no mapping for " + topology.get_name());
    }
    return part;
}
 
开发者ID:QualiMaster,项目名称:Infrastructure,代码行数:80,代码来源:ThriftMonitoringTask.java

示例9: metrics

import backtype.storm.generated.TopologyInfo; //导入方法依赖的package包/类
public boolean metrics(Nimbus.Client client, int size, long now, MetricsState state, String message) throws Exception {
  ClusterSummary summary = client.getClusterInfo();
  long time = now - state.lastTime;
  state.lastTime = now;
  int numSupervisors = summary.get_supervisors_size();
  int totalSlots = 0;
  int totalUsedSlots = 0;
  for (SupervisorSummary sup: summary.get_supervisors()) {
    totalSlots += sup.get_num_workers();
    totalUsedSlots += sup.get_num_used_workers();
  }
  int slotsUsedDiff = totalUsedSlots - state.slotsUsed;
  state.slotsUsed = totalUsedSlots;

  int numTopologies = summary.get_topologies_size();
  long totalTransferred = 0;
  int totalExecutors = 0;
  int executorsWithMetrics = 0;
  int totalFailed = 0;
  for (TopologySummary ts: summary.get_topologies()) {
    String id = ts.get_id();
    TopologyInfo info = client.getTopologyInfo(id);
    for (ExecutorSummary es: info.get_executors()) {
      ExecutorStats stats = es.get_stats();
      totalExecutors++;
      if (stats != null) {
        if (stats.get_specific().is_set_spout()) {
          SpoutStats ss = stats.get_specific().get_spout();
          Map<String, Long> failedMap = ss.get_failed().get(":all-time");
          if (failedMap != null) {
            for (String key: failedMap.keySet()) {
              Long tmp = failedMap.get(key);
              if (tmp != null) {
                totalFailed += tmp;
              }
            }
          }
        }

        Map<String,Map<String,Long>> transferred = stats.get_transferred();
        if ( transferred != null) {
          Map<String, Long> e2 = transferred.get(":all-time");
          if (e2 != null) {
            executorsWithMetrics++;
            //The SOL messages are always on the default stream, so just count those
            Long dflt = e2.get("default");
            if (dflt != null) {
              totalTransferred += dflt;
            }
          }
        }
      }
    }
  }
  long transferredDiff = totalTransferred - state.transferred;
  state.transferred = totalTransferred;
  double throughput = (transferredDiff == 0 || time == 0) ? 0.0 : (transferredDiff * size)/(1024.0 * 1024.0)/(time/1000.0);
  System.out.println(message+"\t"+numTopologies+"\t"+totalSlots+"\t"+totalUsedSlots+"\t"+totalExecutors+"\t"+executorsWithMetrics+"\t"+now+"\t"+time+"\t"+transferredDiff+"\t"+throughput+"\t"+totalFailed);
  if ("WAITING".equals(message)) {
    //System.err.println(" !("+totalUsedSlots+" > 0 && "+slotsUsedDiff+" == 0 && "+totalExecutors+" > 0 && "+executorsWithMetrics+" >= "+totalExecutors+")");
  }
  return !(totalUsedSlots > 0 && slotsUsedDiff == 0 && totalExecutors > 0 && executorsWithMetrics >= totalExecutors);
}
 
开发者ID:yahoo,项目名称:storm-perf-test,代码行数:64,代码来源:Main.java


注:本文中的backtype.storm.generated.TopologyInfo.get_executors方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。