當前位置: 首頁>>代碼示例>>Java>>正文


Java TopologyContext類代碼示例

本文整理匯總了Java中org.apache.storm.task.TopologyContext的典型用法代碼示例。如果您正苦於以下問題:Java TopologyContext類的具體用法?Java TopologyContext怎麽用?Java TopologyContext使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


TopologyContext類屬於org.apache.storm.task包,在下文中一共展示了TopologyContext類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testOpen_useDefaults

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
/**
 * Validate that we set things up with sane defaults.
 */
@Test
public void testOpen_useDefaults() {
    // Create empty config
    final Map<String, Object> config = new HashMap<>();

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate we got called as expected.

    // Shouldn't have interacted with the taskId
    verify(mockTopologyContext, never()).getThisTaskIndex();

    // Should have registered 4 metrics.
    verify(mockTopologyContext, times(1)).registerMetric(eq("GAUGES"), any(MultiReducedMetric.class), eq(defaultTimeWindow));
    verify(mockTopologyContext, times(1)).registerMetric(eq("TIMERS"), any(MultiReducedMetric.class), eq(defaultTimeWindow));
    verify(mockTopologyContext, times(1)).registerMetric(eq("COUNTERS"), any(MultiReducedMetric.class), eq(defaultTimeWindow));

    assertEquals("Should have empty prefix", "", recorder.getMetricPrefix());
    assertTrue("Should have empty prefix", recorder.getMetricPrefix().isEmpty());
}
 
開發者ID:salesforce,項目名稱:storm-dynamic-spout,代碼行數:29,代碼來源:StormRecorderTest.java

示例2: testOpen_taskIdPrefixEnabled

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
/**
 * Validate that you can enable taskId prefixing.
 */
@Test
public void testOpen_taskIdPrefixEnabled() {
    // Define taskId in mock
    final int taskId = 20;

    // Create empty config
    final Map<String, Object> config = new HashMap<>();
    config.put(SpoutConfig.METRICS_RECORDER_ENABLE_TASK_ID_PREFIX, true);

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);
    when(mockTopologyContext.getThisTaskIndex()).thenReturn(taskId);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate
    verify(mockTopologyContext, times(1)).getThisTaskIndex();
    assertEquals("Should have taskId prefix", "task-" + taskId, recorder.getMetricPrefix());
}
 
開發者ID:salesforce,項目名稱:storm-dynamic-spout,代碼行數:25,代碼來源:StormRecorderTest.java

示例3: open

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
public void open(Map stormConf, TopologyContext context,
        SpoutOutputCollector collector) {

    partitionField = ConfUtils.getString(stormConf,
            ESStatusBucketFieldParamName, "_routing");

    bucketSortField = ConfUtils.getString(stormConf,
            ESStatusBucketSortFieldParamName, bucketSortField);

    totalSortField = ConfUtils.getString(stormConf,
            ESStatusGlobalSortFieldParamName);

    maxURLsPerBucket = ConfUtils.getInt(stormConf,
            ESStatusMaxURLsParamName, 1);
    maxBucketNum = ConfUtils.getInt(stormConf, ESStatusMaxBucketParamName,
            10);

    super.open(stormConf, context, collector);
}
 
開發者ID:eorliac,項目名稱:patent-crawler,代碼行數:21,代碼來源:AggregationSpout.java

示例4: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public void prepare(Map conf, TopologyContext context,
        OutputCollector collector) {
    super.prepare(conf, context, collector);
    _collector = collector;

    indexName = ConfUtils.getString(conf, IndexerBolt.ESIndexNameParamName,
            "fetcher");
    docType = ConfUtils.getString(conf, IndexerBolt.ESDocTypeParamName,
            "doc");
    create = ConfUtils.getBoolean(conf, IndexerBolt.ESCreateParamName,
            false);

    try {
        connection = ElasticSearchConnection
                .getConnection(conf, ESBoltType);
    } catch (Exception e1) {
        LOG.error("Can't connect to ElasticSearch", e1);
        throw new RuntimeException(e1);
    }

    this.eventCounter = context.registerMetric("ElasticSearchIndexer",
            new MultiCountMetric(), 10);
}
 
開發者ID:eorliac,項目名稱:patent-crawler,代碼行數:26,代碼來源:IndexerBolt.java

示例5: testOpen_customTimeWindowInt

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
/**
 * Validate that we accept a custom time window set from an int value.
 */
@Test
public void testOpen_customTimeWindowInt() {
    final int timeBucket = 30;

    // Create empty config
    final Map<String, Object> config = new HashMap<>();
    config.put(SpoutConfig.METRICS_RECORDER_TIME_BUCKET, timeBucket);

    // Create mock TopologyContet
    final TopologyContext mockTopologyContext = mock(TopologyContext.class);

    // Create recorder and call open.
    final StormRecorder recorder = new StormRecorder();
    recorder.open(config, mockTopologyContext);

    // Validate we got called as expected.

    // Shouldn't have interacted with the taskId
    verify(mockTopologyContext, never()).getThisTaskIndex();

    // Should have registered 4 metrics.
    verify(mockTopologyContext, times(1)).registerMetric(eq("GAUGES"), any(MultiReducedMetric.class), eq(timeBucket));
    verify(mockTopologyContext, times(1)).registerMetric(eq("TIMERS"), any(MultiReducedMetric.class), eq(timeBucket));
    verify(mockTopologyContext, times(1)).registerMetric(eq("COUNTERS"), any(MultiReducedMetric.class), eq(timeBucket));
}
 
開發者ID:salesforce,項目名稱:storm-dynamic-spout,代碼行數:29,代碼來源:StormRecorderTest.java

示例6: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    LOG.info("Preparing bolt with configuration {}", this);
    //for backward compatibility.
    if (mapper == null) {
        LOG.info("Mapper not specified. Setting default mapper to {}", FieldNameBasedTupleToKafkaMapper.class.getSimpleName());
        this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
    }

    //for backward compatibility.
    if (topicSelector == null) {
        if (stormConf.containsKey(TOPIC)) {
            LOG.info("TopicSelector not specified. Using [{}] for topic [{}] specified in bolt configuration,",
                    DefaultTopicSelector.class.getSimpleName(), stormConf.get(TOPIC));
            this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
        } else {
            throw new IllegalStateException("topic should be specified in bolt's configuration");
        }
    }

    producer = mkProducer(boltSpecifiedProperties);
    this.collector = collector;
}
 
開發者ID:telstra,項目名稱:open-kilda,代碼行數:24,代碼來源:KafkaBolt.java

示例7: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
public void prepare(Map stormConf, TopologyContext context) {
    super.prepare(stormConf, context);
    results = new ArrayList<>();
    descOfOutputFileds = new ArrayList<>();
    inputvalueNameList = new ArrayList<>();

    // 獲取HavingBolt 傳過來的每一行數據的屬性信息 如 user_id,sku_id,cate
    Map<String, Map<String, List<String>>> inputFields = context.getThisInputFields();
    Iterator<String> iter = inputFields.keySet().iterator();
    while (iter.hasNext()) {
        String key = iter.next();
        Map<String, List<String>> val = inputFields.get(key);
        Iterator<String> iter2 = val.keySet().iterator();
        while (iter2.hasNext()) {
            inputvalueNameList = val.get(iter2.next());
            for (String item : inputvalueNameList) {
                System.out.println(item);
            }
        }

    }
}
 
開發者ID:bigdataFlySQL,項目名稱:SQLonStorm,代碼行數:24,代碼來源:ProjectionBolt.java

示例8: open

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
        initialized = false;
        this.context = context;

        // Spout internals
        this.collector = collector;
        numUncommittedOffsets = 0;

        // Offset management
        firstPollOffsetStrategy = kafkaSpoutConfig.getFirstPollOffsetStrategy();
        // with AutoCommitMode, offset will be periodically committed in the background by Kafka consumer
//        consumerAutoCommitMode = kafkaSpoutConfig.isConsumerAutoCommitMode();
        //永遠設置為false
        consumerAutoCommitMode = false;

        // Retries management
        retryService = kafkaSpoutConfig.getRetryService();

        if (!consumerAutoCommitMode) {     // If it is auto commit, no need to commit offsets manually
            commitTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getOffsetsCommitPeriodMs(), TimeUnit.MILLISECONDS);
        }
        refreshSubscriptionTimer = new Timer(TIMER_DELAY_MS, kafkaSpoutConfig.getPartitionRefreshPeriodMs(), TimeUnit.MILLISECONDS);

        acked = new HashMap<>();
        emitted = new HashSet<>();
        waitingToEmit = Collections.emptyListIterator();

        LOG.info("Kafka Spout opened with the following configuration: {}", kafkaSpoutConfig);
    }
 
開發者ID:Paleozoic,項目名稱:storm_spring_boot_demo,代碼行數:31,代碼來源:KafkaSpout.java

示例9: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
        super.prepare(stormConf, context, collector);
        this.collector = collector;
//        results = new ArrayList<>();
        Map<String, Map<String, List<String>>> inputFields = context.getThisInputFields();
        Iterator<String> iter = inputFields.keySet().iterator();
        while (iter.hasNext()){
            String key = iter.next();
            Map<String, List<String>> val = inputFields.get(key);
            Iterator<String> iter2 = val.keySet().iterator();
            while (iter2.hasNext()){
                inputValueNameList = val.get(iter2.next());
                for(String item : inputValueNameList){
                    System.out.println(item);
                }
            }
        }
    }
 
開發者ID:bigdataFlySQL,項目名稱:SQLonStorm,代碼行數:20,代碼來源:JoinBolt.java

示例10: invalidJsonForDiscoveryFilter

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    OFELinkBolt bolt = new OFELinkBolt(config);

    TopologyContext context = Mockito.mock(TopologyContext.class);

    Mockito.when(context.getComponentId(TASK_ID_BOLT))
            .thenReturn(COMPONENT_ID_SOURCE);
    Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
            .thenReturn(KafkaMessage.FORMAT);

    OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
    OutputCollector output = new OutputCollector(outputDelegate);

    bolt.prepare(stormConfig(), context, output);
    bolt.initState(new InMemoryKeyValueState<>());

    Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
    bolt.doWork(tuple);

    Mockito.verify(outputDelegate).ack(tuple);
}
 
開發者ID:telstra,項目名稱:open-kilda,代碼行數:25,代碼來源:OFELinkBoltTest.java

示例11: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.context = context;

    if (!initialized) {
        this.topologyId = (String) conf.get(StormConfigKey.TOPOLOGY_ID);
        this.datasource = (String) conf.get(StormConfigKey.DATASOURCE);
        this.zkConnect = (String) conf.get(StormConfigKey.ZKCONNECT);
        this.zkRoot = Utils.buildZKTopologyPath(topologyId);
        // 初始化配置文件
        try {
            PropertiesHolder.initialize(zkConnect, zkRoot);
            GlobalCache.initialize(datasource);
            handlerManager = new BoltHandlerManager(buildProvider());
            reloadBolt(null);
            logger.info(getClass().getName() + " Initialized!");
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            throw new InitializationException(e);
        }
        initialized = true;
    }
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:25,代碼來源:DbusAppenderBolt.java

示例12: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.context = context;

    if (!initialized) {
        this.topologyId = (String) conf.get(Constants.StormConfigKey.TOPOLOGY_ID);
        this.datasource = (String) conf.get(Constants.StormConfigKey.DATASOURCE);
        this.zkconnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
        this.zkRoot = Utils.buildZKTopologyPath(topologyId);
        try {
            PropertiesHolder.initialize(zkconnect, zkRoot);
            producer = createProducer(context.getThisTaskId());
            topicProvider = new DataOutputTopicProvider();
            GlobalCache.initialize(datasource);
            handlerManager = new BoltHandlerManager(buildProvider());
            initialized = true;
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            throw new InitializationException(e);
        }
    }
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:23,代碼來源:DbusHeartBeatBolt.java

示例13: prepare

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID);
    this.isGlobal = this.topologyId.toLowerCase().indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }
    this.zkconnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.zkTopoRoot = Constants.TOPOLOGY_ROOT + "/" + Constants.FULL_SPLITTING_PROPS_ROOT;
    if (!initialized) {
        // 初始化配置文件
        try {
            loadRunningConf(null);
        } catch (Exception e) {
            throw new InitializationException(e);
        }
        initialized = true;
    }
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:22,代碼來源:DataShardsSplittingBolt.java

示例14: open

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
/**
 * 初始化collector
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;
    this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_SPLITTER_TOPOLOGY_ID);
    this.isGlobal = this.topologyId.toLowerCase().indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }
    LOG.info("topologyId:{} zkConnect:{} zkTopoRoot:{}", topologyId, zkConnect, zkTopoRoot);
    try {
        loadRunningConf(null);

        // 檢查是否有遺留未決的拉取任務。如果有,resolve(發resume消息通知給appender,並在zk上記錄,且將監測到的pending任務寫日誌,方便排查問題)。
        // 對於已經resolve的pending任務,將其移出pending隊列,以免造成無限重複處理。
        FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null,
                DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
    } catch (Exception e) {
        e.printStackTrace();
        throw new InitializationException();
    }
    LOG.info("Topology {} is started!", topologyId);
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:29,代碼來源:DataShardsSplittingSpout.java

示例15: open

import org.apache.storm.task.TopologyContext; //導入依賴的package包/類
/**
 * 初始化collectors
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;
    this.zkConnect = (String) conf.get(Constants.StormConfigKey.ZKCONNECT);
    this.topologyId = (String) conf.get(Constants.StormConfigKey.FULL_PULLER_TOPOLOGY_ID);

    this.isGlobal = this.topologyId.toLowerCase().
            indexOf(ZkTopoConfForFullPull.GLOBAL_FULLPULLER_TOPO_PREFIX) != -1 ? true : false;
    if (this.isGlobal) {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT_GLOBAL;
    } else {
        this.zkMonitorRootNodePath = Constants.FULL_PULL_MONITOR_ROOT;
    }

    try {
        loadRunningConf(null);
        // 檢查是否有遺留未決的拉取任務。如果有,resolve(發resume消息通知給appender,並在zk上記錄,且將監測到的pending任務寫日誌,方便排查問題)。
        // 對於已經resolve的pending任務,將其移出pending隊列,以免造成無限重複處理。
        FullPullHelper.updatePendingTasksTrackInfo(zkService, dsName, null, DataPullConstants.FULLPULL_PENDING_TASKS_OP_CRASHED_NOTIFY);
    } catch (Exception e) {
        e.printStackTrace();
        throw new InitializationException();
    }
    LOG.info("Topology {} is started!", topologyId);
}
 
開發者ID:BriData,項目名稱:DBus,代碼行數:29,代碼來源:DataPullingSpout.java


注:本文中的org.apache.storm.task.TopologyContext類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。