当前位置: 首页>>代码示例>>Java>>正文


Java TopologyContext类代码示例

本文整理汇总了Java中org.apache.storm.task.TopologyContext的典型用法代码示例。如果您正苦于以下问题:Java TopologyContext类的具体用法?Java TopologyContext怎么用?Java TopologyContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TopologyContext类属于org.apache.storm.task包,在下文中一共展示了TopologyContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testOpen_useDefaults

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * Validate that we set things up with sane defaults.
 */
@Test
public void testOpen_useDefaults() {
    // Create empty config
    final Map<String, Object> config = new HashMap<>();

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate we got called as expected.

    // Shouldn't have interacted with the taskId
    verify(mockTopologyContext, never()).getThisTaskIndex();

    // Should have registered 4 metrics.
    verify(mockTopologyContext, times(1)).registerMetric(eq("GAUGES"), any(MultiReducedMetric.class), eq(defaultTimeWindow));
    verify(mockTopologyContext, times(1)).registerMetric(eq("TIMERS"), any(MultiReducedMetric.class), eq(defaultTimeWindow));
    verify(mockTopologyContext, times(1)).registerMetric(eq("COUNTERS"), any(MultiReducedMetric.class), eq(defaultTimeWindow));

    assertEquals("Should have empty prefix", "", recorder.getMetricPrefix());
    assertTrue("Should have empty prefix", recorder.getMetricPrefix().isEmpty());
}
 
开发者ID:salesforce,项目名称:storm-dynamic-spout,代码行数:29,代码来源:StormRecorderTest.java

示例2: testOpen_taskIdPrefixEnabled

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * Validate that you can enable taskId prefixing.
 */
@Test
public void testOpen_taskIdPrefixEnabled() {
    // Define taskId in mock
    final int taskId = 20;

    // Create empty config
    final Map<String, Object> config = new HashMap<>();
    config.put(SpoutConfig.METRICS_RECORDER_ENABLE_TASK_ID_PREFIX, true);

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);
    when(mockTopologyContext.getThisTaskIndex()).thenReturn(taskId);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate
    verify(mockTopologyContext, times(1)).getThisTaskIndex();
    assertEquals("Should have taskId prefix", "task-" + taskId, recorder.getMetricPrefix());
}
 
开发者ID:salesforce,项目名称:storm-dynamic-spout,代码行数:25,代码来源:StormRecorderTest.java

示例3: open

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void open(Map stormConf, TopologyContext context,
        SpoutOutputCollector collector) {

    partitionField = ConfUtils.getString(stormConf,
            ESStatusBucketFieldParamName, "_routing");

    bucketSortField = ConfUtils.getString(stormConf,
            ESStatusBucketSortFieldParamName, bucketSortField);

    totalSortField = ConfUtils.getString(stormConf,
            ESStatusGlobalSortFieldParamName);

    maxURLsPerBucket = ConfUtils.getInt(stormConf,
            ESStatusMaxURLsParamName, 1);
    maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
            10);

    super.open(stormConf, context, collector);
}
 
开发者ID:eorliac,项目名称:patent-crawler,代码行数:21,代码来源:AggregationSpout.java

示例4: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public void prepare(Map conf, TopologyContext context,
        OutputCollector collector) {
    super.prepare(conf, context, collector);
    _collector = collector;

    indexName = ConfUtils.getString(conf, IndexerBolt.ESIndexNameParamName,
            "fetcher");
    docType = ConfUtils.getString(conf, IndexerBolt.ESDocTypeParamName,
            "doc");
    create = ConfUtils.getBoolean(conf, IndexerBolt.ESCreateParamName,
            false);

    try {
        connection = ElasticSearchConnection
                .getConnection(conf, ESBoltType);
    } catch (Exception e1) {
        LOG.error("Can't connect to ElasticSearch", e1);
        throw new RuntimeException(e1);
    }

    this.eventCounter = context.registerMetric("ElasticSearchIndexer",
            new MultiCountMetric(), 10);
}
 
开发者ID:eorliac,项目名称:patent-crawler,代码行数:26,代码来源:IndexerBolt.java

示例5: testOpen_customTimeWindowInt

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * Validate that we accept a custom time window set from an int value.
 */
@Test
public void testOpen_customTimeWindowInt() {
    final int timeBucket = 30;

    // Create empty config
    final Map<String, Object> config = new HashMap<>();
    config.put(SpoutConfig.METRICS_RECORDER_TIME_BUCKET, timeBucket);

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate we got called as expected.

    // Shouldn't have interacted with the taskId
    verify(mockTopologyContext, never()).getThisTaskIndex();

    // Should have registered 4 metrics.
    verify(mockTopologyContext, times(1)).registerMetric(eq("GAUGES"), any(MultiReducedMetric.class), eq(timeBucket));
    verify(mockTopologyContext, times(1)).registerMetric(eq("TIMERS"), any(MultiReducedMetric.class), eq(timeBucket));
    verify(mockTopologyContext, times(1)).registerMetric(eq("COUNTERS"), any(MultiReducedMetric.class), eq(timeBucket));
}
 
开发者ID:salesforce,项目名称:storm-dynamic-spout,代码行数:29,代码来源:StormRecorderTest.java

示例6: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    LOG.info("Preparing bolt with configuration {}", this);
    //for backward compatibility.
    if (mapper == null) {
        LOG.info("Mapper not specified. Setting default mapper to {}", FieldNameBasedTupleToKafkaMapper.class.getSimpleName());
        this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
    }

    //for backward compatibility.
    if (topicSelector == null) {
        if (stormConf.containsKey(TOPIC)) {
            LOG.info("TopicSelector not specified. Using [{}] for topic [{}] specified in bolt configuration,",
                    DefaultTopicSelector.class.getSimpleName(), stormConf.get(TOPIC));
            this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
        } else {
            throw new IllegalStateException("topic should be specified in bolt's configuration");
        }
    }

    producer = mkProducer(boltSpecifiedProperties);
    this.collector = collector;
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:24,代码来源:KafkaBolt.java

示例7: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context) {
    super.prepare(stormConf, context);
    results = new ArrayList<>();
    descOfOutputFileds = new ArrayList<>();
    inputvalueNameList = new ArrayList<>();

    // 获取HavingBolt 传过来的每一行数据的属性信息 如 user_id,sku_id,cate
    Map<String, Map<String, List<String>>> inputFields = context.getThisInputFields();
    Iterator<String> iter = inputFields.keySet().iterator();
    while (iter.hasNext()) {
        String key = iter.next();
        Map<String, List<String>> val = inputFields.get(key);
        Iterator<String> iter2 = val.keySet().iterator();
        while (iter2.hasNext()) {
            inputvalueNameList = val.get(iter2.next());
            for (String item : inputvalueNameList) {
                System.out.println(item);
            }
        }

    }
}
 
开发者ID:bigdataFlySQL,项目名称:SQLonStorm,代码行数:24,代码来源:ProjectionBolt.java

示例8: open

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
        initialized = false;
        this.context = context;

        // Spout internals
        this.collector = collector;
        numUncommittedOffsets = 0;

        // Offset management
        firstPollOffsetStrategy = kafkaSpoutConfig.getFirstPollOffsetStrategy();
        // with AutoCommitMode, offset will be periodically committed in the background by Kafka consumer
//        consumerAutoCommitMode = kafkaSpoutConfig.isConsumerAutoCommitMode();
        //永远设置为false
        consumerAutoCommitMode = false;

        // Retries management
        retryService = kafkaSpoutConfig.getRetryService();

        if (!consumerAutoCommitMode) {     // If it is auto commit, no need to commit offsets manually
            commitTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getOffsetsCommitPeriodMs(), TimeUnit.MILLISECONDS);
        }
        refreshSubscriptionTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getPartitionRefreshPeriodMs(), TimeUnit.MILLISECONDS);

        acked = new HashMap<>();
        emitted = new HashSet<>();
        waitingToEmit = Collections.emptyListIterator();

        LOG.info("Kafka Spout opened with the following configuration: {}", kafkaSpoutConfig);
    }
 
开发者ID:Paleozoic,项目名称:storm_spring_boot_demo,代码行数:31,代码来源:KafkaSpout.java

示例9: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
        super.prepare(stormConf, context, collector);
        this.collector = collector;
//        results = new ArrayList<>();
        Map<String, Map<String, List<String>>> inputFields = context.getThisInputFields();
        Iterator<String> iter = inputFields.keySet().iterator();
        while (iter.hasNext()){
            String key = iter.next();
            Map<String, List<String>> val = inputFields.get(key);
            Iterator<String> iter2 = val.keySet().iterator();
            while (iter2.hasNext()){
                inputValueNameList = val.get(iter2.next());
                for(String item : inputValueNameList){
                    System.out.println(item);
                }
            }
        }
    }
 
开发者ID:bigdataFlySQL,项目名称:SQLonStorm,代码行数:20,代码来源:JoinBolt.java

示例10: invalidJsonForDiscoveryFilter

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    OFELinkBolt bolt = new OFELinkBolt(config);

    TopologyContext context = Mockito.mock(TopologyContext.class);

    Mockito.when(context.getComponentId(TASK_ID_BOLT))
            .thenReturn(COMPONENT_ID_SOURCE);
    Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
            .thenReturn(KafkaMessage.FORMAT);

    OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
    OutputCollector output = new OutputCollector(outputDelegate);

    bolt.prepare(stormConfig(), context, output);
    bolt.initState(new InMemoryKeyValueState<>());

    Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
    bolt.doWork(tuple);

    Mockito.verify(outputDelegate).ack(tuple);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:25,代码来源:OFELinkBoltTest.java

示例11: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.context = context;

    if (!initialized) {
        this.topologyId = (String) conf.get(StormConfigKey.TOPOLOGY_ID);
        this.datasource = (String) conf.get(StormConfigKey.DATASOURCE);
        this.zkConnect = (String) conf.get(StormConfigKey.ZKCONNECT);
        this.zkRoot = Utils.buildZKTopologyPath(topologyId);
        // 初始化配置文件
        try {
            PropertiesHolder.initialize(zkConnect, zkRoot);
            GlobalCache.initialize(datasource);
            handlerManager = new BoltHandlerManager(buildProvider());
            reloadBolt(null);
            logger.info(getClass().getName() + " Initialized!");
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            throw new InitializationException(e);
        }
        initialized = true;
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:25,代码来源:DbusAppenderBolt.java

示例12: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.context = context;

    if (!initialized) {
        this.topologyId = (String) conf.get(Constants.StormConfigKey.TOPOLOGY_ID);
        this.datasource = (String) conf.get(Constants.StormConfigKey.DATASOURCE);
        this.zkconnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
        this.zkRoot = Utils.buildZKTopologyPath(topologyId);
        try {
            PropertiesHolder.initialize(zkconnect, zkRoot);
            producer = createProducer(context.getThisTaskId());
            topicProvider = new DataOutputTopicProvider();
            GlobalCache.initialize(datasource);
            handlerManager = new BoltHandlerManager(buildProvider());
            initialized = true;
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            throw new InitializationException(e);
        }
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:23,代码来源:DbusHeartBeatBolt.java

示例13: prepare

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID);
    this.isGlobal = this.topologyId.toLowerCase().indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }
    this.zkconnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.zkTopoRoot = Constants.TOPOLOGY_ROOT + "/" + Constants.FULL_SPLITTING_PROPS_ROOT;
    if (!initialized) {
        // 初始化配置文件
        try {
            loadRunningConf(null);
        } catch (Exception e) {
            throw new InitializationException(e);
        }
        initialized = true;
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:22,代码来源:DataShardsSplittingBolt.java

示例14: open

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * 初始化collector
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;
    this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID);
    this.isGlobal = this.topologyId.toLowerCase().indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }
    LOG.info("topologyId:{} zkConnect:{} zkTopoRoot:{}", topologyId, zkConnect, zkTopoRoot);
    try {
        loadRunningConf(null);

        // 检查是否有遗留未决的拉取任务。如果有,resolve(发resume消息通知给appender,并在zk上记录,且将监测到的pending任务写日志,方便排查问题)。
        // 对于已经resolve的pending任务,将其移出pending队列,以免造成无限重复处理。
        FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null,
                DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
    } catch (Exception e) {
        e.printStackTrace();
        throw new InitializationException();
    }
    LOG.info("Topology {} is started!", topologyId);
}
 
开发者ID:BriData,项目名称:DBus,代码行数:29,代码来源:DataShardsSplittingSpout.java

示例15: open

import org.apache.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * 初始化collectors
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;
    this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID);

    this.isGlobal = this.topologyId.toLowerCase().
            indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }

    try {
        loadRunningConf(null);
        // 检查是否有遗留未决的拉取任务。如果有,resolve(发resume消息通知给appender,并在zk上记录,且将监测到的pending任务写日志,方便排查问题)。
        // 对于已经resolve的pending任务,将其移出pending队列,以免造成无限重复处理。
        FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null, DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
    } catch (Exception e) {
        e.printStackTrace();
        throw new InitializationException();
    }
    LOG.info("Topology {} is started!", topologyId);
}
 
开发者ID:BriData,项目名称:DBus,代码行数:29,代码来源:DataPullingSpout.java


注:本文中的org.apache.storm.task.TopologyContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。