当前位置: 首页>>代码示例>>Java>>正文


Java TopologyContext类代码示例

本文整理汇总了Java中backtype.storm.task.TopologyContext的典型用法代码示例。如果您正苦于以下问题:Java TopologyContext类的具体用法?Java TopologyContext怎么用?Java TopologyContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TopologyContext类属于backtype.storm.task包,在下文中一共展示了TopologyContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    //for backward compatibility.
    if(mapper == null) {
        this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
    }

    //for backward compatibility.
    if(topicSelector == null) {
        this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
    }

    Map configMap = (Map) stormConf.get(KAFKA_BROKER_PROPERTIES);
    Properties properties = new Properties();
    properties.putAll(configMap);
    ProducerConfig config = new ProducerConfig(properties);
    producer = new Producer<K, V>(config);
    this.collector = collector;
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:20,代码来源:KafkaBolt.java

示例2: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map map, Object conf, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
    Map<String, Object> config = (Map<String, Object>) conf;
    RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
    client = CuratorFrameworkFactory.newClient(config.get("zookeeper").toString(), retryPolicy);
    client.start();


    try {
        if (client.checkExists().forPath("/consumers/rb-storm") == null) {
            client.create().creatingParentsIfNeeded().forPath("/consumers/rb-storm");
            System.out.println("Creating /consumers/rb-storm path ...");
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:19,代码来源:KafkaConsumerMonitorMetrics.java

示例3: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, Object registrationArgument, TopologyContext context,
                    final IErrorReporter errorReporter) {

    LOG.info("Initializing the " + getClass().getCanonicalName());

    this.graphitePath = getConfiguredGraphitBasePath(stormConf);
    if (!stormConf.containsKey(CONF_MONITORING_GRAPHITE_SERVER)) {
        throw new RuntimeException("Missing graphite configuration. You need to specify the server and the port" +
                "under which the graphite server can be reached. Example: " + CONF_MONITORING_GRAPHITE_SERVER
                + "=graphite.yourdomain.com:2003");
    }
    this.graphiteConnection = (String) stormConf.get(CONF_MONITORING_GRAPHITE_SERVER);

    getGraphiteWriter();

    this.metricsToProcess = new HashMap<>();
    this.metricsToProcess.put(MonitoringMetricsCollectionHook.METRIC_COMPLETE_LATENCY, new AverageAggregator());
    this.metricsToProcess.put(MonitoringMetricsCollectionHook.METRIC_THROUGHPUT, new SumAggregator());
    this.metricsToProcess.put(MonitoringMetricsCollectionHook.METRIC_WORKER_CPU_LOAD, new SumAggregator());
    this.metricsToProcess.put(MonitoringMetricsCollectionHook.METRIC_WORKER_NETWORK_BYTES, new SumAggregator());
}
 
开发者ID:uzh,项目名称:storm-scheduler,代码行数:23,代码来源:MonitoringMetricsToGraphiteWriter.java

示例4: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public void prepare(Map map, TopologyContext topologyContext, OutputCollector collector) {
    this.collector = collector;
    final Configuration hbConfig = HBaseConfiguration.create();
    
    Map<String, Object> conf = (Map<String, Object>)map.get(this.configKey);
    if(conf == null) {
        throw new IllegalArgumentException("HBase configuration not found using key '" + this.configKey + "'");
    }
    if(conf.get("hbase.rootdir") == null) {
        LOG.warn("No 'hbase.rootdir' value found in configuration! Using HBase defaults.");
    }
    for(String key : conf.keySet()) {
        hbConfig.set(key, String.valueOf(conf.get(key)));
    }

    this.hBaseClient = new HBaseClient(conf, hbConfig, tableName);
}
 
开发者ID:mengzhiyi,项目名称:storm-hbase-1.0.x,代码行数:18,代码来源:AbstractHBaseBolt.java

示例5: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context) {
	mapper = new ObjectMapper();
	this.configParams = (JSONObject) new JSONObject(this.config).get("params");
       try {
		this.configMap = mapper.readValue(this.config,new TypeReference<Map<String, Object>>() {});
		mapParams = (Map<String,Object>) configMap.get("params");
	} catch (IOException e) {
		e.printStackTrace();
	}

	field = getParam("field1", true);
	operator = getParam("operator", true);
	field2 = getParam("field2", true);
	entity = getParam("entity");

}
 
开发者ID:telefonicaid,项目名称:fiware-sinfonier,代码行数:18,代码来源:ConditionalFields.java

示例6: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf, TopologyContext context) {

	mapper = new ObjectMapper();
	this.configParams = (JSONObject) new JSONObject(this.config).get("params");
       try {
		this.configMap = mapper.readValue(this.config,new TypeReference<Map<String, Object>>() {});
		mapParams = (Map<String,Object>) configMap.get("params");
	} catch (IOException e) {
		e.printStackTrace();
	}

	field = getParam("field", true);
	operator = (String) getParam("operator", true);
	value = (String) getParam("value", true);
	entity = (String) getParam("value");

	if (operator.equals("RegexExpression")) {
		pattern = Pattern.compile(value, Pattern.DOTALL);
	}

}
 
开发者ID:telefonicaid,项目名称:fiware-sinfonier,代码行数:23,代码来源:Conditional.java

示例7: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public void open(Map conf, TopologyContext ctx, SpoutOutputCollector collector) {
	this.collector = collector;
	this.history = new ProcessedHistory();
	this.subreddit = (String) conf.get("subreddit");
	
	try {
		this.subredditCommentsfeedURL = new URL((String)conf.get("feedURL"));
	} catch (MalformedURLException e) {
		throw new RuntimeException(e);
	}
	LOG.info("Spout subreddit:{} feedURL:{}", this.subreddit, this.subredditCommentsfeedURL);
	
	if (conf.containsKey("sentimentData")) {
		LOG.info("Spouts can also see sentimentData");
	}
}
 
开发者ID:pathbreak,项目名称:reddit-sentiment-storm,代码行数:17,代码来源:SubredditCommentsSpout.java

示例8: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public void prepare(Map conf, TopologyContext ctx, OutputCollector collector) {
	
	this.collector = collector;
	this.myId = ctx.getThisComponentId() + "-" + ctx.getThisTaskId();
	
	this.summary = new Summary();
	
	this.publisher = new ZkPublisher();
	try {
		this.publisher.init(conf);
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
	
	this.lastPublishedTimestamp = 0;
}
 
开发者ID:pathbreak,项目名称:reddit-sentiment-storm,代码行数:17,代码来源:SummarizerBolt.java

示例9: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
/**
 * Open file with stock tick data and read into List object.
 */
@Override
public void open(Map map,
                 TopologyContext context,
                 SpoutOutputCollector outputCollector) {
  this.outputCollector = outputCollector;

  try {
    ticks = 
      IOUtils.readLines(ClassLoader.getSystemResourceAsStream(
 "NASDAQ_daily_prices_A.csv"),
        Charset.defaultCharset().name());
  } catch (IOException e) {
      throw new RuntimeException(e);
  }
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:19,代码来源:StockTicksSpout.java

示例10: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	try {
		_collector = collector;

		ru = new RedisUtil();
		redis = ru.getJedisInstance();

		SchedulerFactory schedulerFactory = new StdSchedulerFactory();
		Scheduler scheduler = schedulerFactory.getScheduler();
		_qManager = new QuartzManager();
		_qManager.setScheduler(scheduler);
		PlatformUtils.initRegisterProject(_qManager);
		scheduler.start();

		//init Hbase tables
		CreateTable.initHbaseTable();
	}catch(Exception ex){
		logger.error("error:"+MySysLogger.formatException(ex));
		ex.printStackTrace();
	}
}
 
开发者ID:cutoutsy,项目名称:miner,代码行数:22,代码来源:BeginSpout.java

示例11: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public void open(Map conf, TopologyContext context,
                 SpoutOutputCollector collector) {
    queue = new LinkedBlockingQueue<String>(1000);
    this.collector = collector;

    StatusListener listener = new StatusListener() {
        public void onStatus(Status status) {
            queue.offer(TwitterObjectFactory.getRawJSON(status));
        }

        public void onDeletionNotice(StatusDeletionNotice sdn) { }
        public void onTrackLimitationNotice(int i) { }
        public void onScrubGeo(long l, long l1) { }
        public void onStallWarning(StallWarning stallWarning) { }
        public void onException(Exception e) { }
    };

    ConfigurationBuilder cb = new ConfigurationBuilder();
    cb.setJSONStoreEnabled(true);

    TwitterStreamFactory factory = new TwitterStreamFactory(cb.build());
    twitterStream = factory.getInstance();
    twitterStream.addListener(listener);
    twitterStream.filter(new FilterQuery().language("en").track("trump"));
}
 
开发者ID:mayconbordin,项目名称:erad2016-streamprocessing,代码行数:26,代码来源:TwitterSpout.java

示例12: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void open(Map arg0, TopologyContext arg1, SpoutOutputCollector arg2) {
  /*
   * FileReader fileReader; try { fileReader = new
   * FileReader("C:/proj/Migration/apache-storm-0.9.4/apache-storm-0.9.4/logs/InPut.txt");
   * bufferedReader =new BufferedReader(fileReader); _collector= arg2; } catch
   * (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); }
   */

  try {
    _collector = arg2;
    setUpConnection();
  } catch (JMSException e) {
    LOGGER.error(e);
  }

}
 
开发者ID:techysoul,项目名称:java,代码行数:18,代码来源:DeliveryCheckSpout.java

示例13: open

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	//fileName = (String) conf.get("file");
	this.collector = collector;

	try {
		reader = new BufferedReader(new FileReader(fileName));
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:12,代码来源:FileSpout.java

示例14: TridentKafkaEmitter

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
    _config = config;
    _topologyInstanceId = topologyInstanceId;
    _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
    _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
    _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
    context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
    _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
    _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
}
 
开发者ID:redBorder,项目名称:rb-bi,代码行数:11,代码来源:TridentKafkaEmitter.java

示例15: prepare

import backtype.storm.task.TopologyContext; //导入依赖的package包/类
@Override
public void prepare(Map stormConf,
                    Object registrationArgument,
                    TopologyContext context,
                    IErrorReporter errorReporter) {
    this.stormId = context.getStormId();
    this.extemptMetrics = new HashSet<>();

    this.extemptMetrics.add(SchedulingMetricsCollectionHook.METRIC_EMITTED_MESSAGES);
}
 
开发者ID:uzh,项目名称:storm-scheduler,代码行数:11,代码来源:LoggingMetricsConsumer.java


注:本文中的backtype.storm.task.TopologyContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。