当前位置: 首页>>代码示例>>Java>>正文


Java IBatchSpout类代码示例

本文整理汇总了Java中storm.trident.spout.IBatchSpout的典型用法代码示例。如果您正苦于以下问题:Java IBatchSpout类的具体用法?Java IBatchSpout怎么用?Java IBatchSpout使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


IBatchSpout类属于storm.trident.spout包,在下文中一共展示了IBatchSpout类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSpoutComponentConfig

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
private static Map getSpoutComponentConfig(Object spout) {
    if(spout instanceof IRichSpout) {
        return ((IRichSpout) spout).getComponentConfiguration();
    } else if (spout instanceof IBatchSpout) {
        return ((IBatchSpout) spout).getComponentConfiguration();
    } else {
        return ((ITridentSpout) spout).getComponentConfiguration();
    }
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:10,代码来源:TridentTopology.java

示例2: buildTopology

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
@Override
public StormTopology buildTopology(Config topologyConf) throws Exception {
    IBatchSpout wordSpout = new FileBasedBatchSpout("words.txt", new Fields("word"), 10);

    TridentTopology topology = new TridentTopology();

    Stream wordsStream = topology.newStream("someWords", wordSpout);

    TridentKafkaStateFactory stateFactory = TridentConnectorUtil.getTridentKafkaStateFactory(TOPIC_NAME, kafkaBrokerlist, "word", "word", topologyConf);
    wordsStream.partitionPersist(stateFactory, new Fields("word"), new TridentKafkaUpdater(), new Fields()).parallelismHint(1);

    JmsStateFactory jmsStateFactory = TridentConnectorUtil.getJmsStateFactory(jmsConnectionString, JMS_QUEUE_NAME);
    wordsStream.partitionPersist(jmsStateFactory, new Fields("word"), new JmsUpdater(), new Fields()).parallelismHint(1);

    Stream kafkaStream = topology.newStream("kafkaTridentSpout",  TridentConnectorUtil.getTridentKafkaEmitter(zkConnString, TOPIC_NAME, topologyConf)).parallelismHint(1);
    Stream jmsStream = topology.newStream("jmsTridentSpout",  TridentConnectorUtil.getTridentJmsSpouts(jmsConnectionString, JMS_QUEUE_NAME, topologyConf, "words")).parallelismHint(1);

    kafkaStream = kafkaStream.global().each(new Fields("str"), new TridentWordCount(), new Fields("word","count")).parallelismHint(1);
    jmsStream = jmsStream.global().each(new Fields("words"), new TridentWordCount(), new Fields("word","count")).parallelismHint(1);

    HBaseStateFactory hBaseStateFactory = TridentConnectorUtil.getTridentHbaseFactory(hbaseUrl, TABLE_NAME, "word", COLUMN_FAMILY, Lists.newArrayList("word"),
            Lists.newArrayList("count"), topologyConf);
    TridentState tridentState = jmsStream.global().partitionPersist(hBaseStateFactory, new Fields("word", "count"), new HBaseUpdater(), new Fields()).parallelismHint(1);

    HdfsStateFactory tridentHdfsFactory = TridentConnectorUtil.getTridentHdfsFactory(hdfsUrl, HDFS_SRC_DIR, HDFS_ROTATION_DIR, "word", "count");
    kafkaStream.global().partitionPersist(tridentHdfsFactory, new Fields("word", "count"), new HdfsUpdater(), new Fields()).parallelismHint(1);

    CassandraStateFactory cassandraStateFactory = TridentConnectorUtil.getCassandraStateFactory(cassandraConnString, KEY_SPACE_NAME, "word", COLUMN_FAMILY, topologyConf);
    Map<String, Class> fieldToTypeMap = Maps.newHashMap();
    fieldToTypeMap.put("word", String.class);
    fieldToTypeMap.put("count", Long.class);
    SimpleCassandraTridentTupleMapper mapper = new SimpleCassandraTridentTupleMapper(KEY_SPACE_NAME, COLUMN_FAMILY, "word",fieldToTypeMap);
    kafkaStream.global().partitionPersist(cassandraStateFactory, new Fields("word", "count"),
            new CassandraUpdater(mapper), new Fields()).parallelismHint(1);
    return topology.build();
}
 
开发者ID:Parth-Brahmbhatt,项目名称:storm-smoke-test,代码行数:37,代码来源:WordCountTridentSmokeTest.java

示例3: newStream

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
public Stream newStream(String txId, ITridentDataSource dataSource) {
    if (dataSource instanceof IBatchSpout) {
        return newStream(txId, (IBatchSpout) dataSource);
    } else if (dataSource instanceof ITridentSpout) {
        return newStream(txId, (ITridentSpout) dataSource);
    } else if (dataSource instanceof IPartitionedTridentSpout) {
        return newStream(txId, (IPartitionedTridentSpout) dataSource);
    } else if (dataSource instanceof IOpaquePartitionedTridentSpout) {
        return newStream(txId, (IOpaquePartitionedTridentSpout) dataSource);
    } else {
        throw new UnsupportedOperationException("Unsupported stream");
    }
}
 
开发者ID:alibaba,项目名称:jstorm,代码行数:14,代码来源:TridentTopology.java

示例4: setSpout

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
public SpoutDeclarer setSpout(String id, String streamName, String txStateId, IBatchSpout spout, Integer parallelism, String batchGroup) {
    return setSpout(id, streamName, txStateId, new BatchSpoutExecutor(spout), parallelism, batchGroup);
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:4,代码来源:TridentTopologyBuilder.java

示例5: newStream

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
public Stream newStream(String txId, IBatchSpout spout) {
    Node n = new SpoutNode(getUniqueStreamId(), spout.getOutputFields(), txId, spout, SpoutNode.SpoutType.BATCH);
    return addNode(n);
}
 
开发者ID:zhangjunfang,项目名称:jstorm-0.9.6.3-,代码行数:5,代码来源:TridentTopology.java

示例6: basicPrimitives

import storm.trident.spout.IBatchSpout; //导入依赖的package包/类
public static StormTopology basicPrimitives(IBatchSpout spout) throws IOException {

        // A topology is a set of streams.
        // A stream is a DAG of Spouts and Bolts.
        // (In Storm there are Spouts (data producers) and Bolts (data processors).
        // Spouts create Tuples and Bolts manipulate then and possibly emit new ones.)

        // But in Trident we operate at a higher level.
        // Bolts are created and connected automatically out of higher-level constructs.
        // Also, Spouts are "batched".
        TridentTopology topology = new TridentTopology();

        // The "each" primitive allows us to apply either filters or functions to the stream
        // We always have to select the input fields.
        topology
                .newStream("filter", spout)
                .each(new Fields("actor"), new RegexFilter("pere"))
                .each(new Fields("text", "actor"), new Print());

        // Functions describe their output fields, which are always appended to the input fields.
        // As you see, Each operations can be chained.
        topology
                .newStream("function", spout)
                .each(new Fields("text"), new ToUpperCase(), new Fields("uppercased_text"))
                .each(new Fields("text", "uppercased_text"), new Print());

        // You can prune unnecessary fields using "project"
        topology
                .newStream("projection", spout)
                .each(new Fields("text"), new ToUpperCase(), new Fields("uppercased_text"))
                .project(new Fields("uppercased_text"))
                .each(new Fields("uppercased_text"), new Print());

        // Stream can be parallelized with "parallelismHint"
        // Parallelism hint is applied downwards until a partitioning operation (we will see this later).
        // This topology creates 5 spouts and 5 bolts:
        // Let's debug that with TridentOperationContext.partitionIndex !
        topology
                .newStream("parallel", spout)
                .each(new Fields("actor"), new RegexFilter("pere"))
                .parallelismHint(5)
                .each(new Fields("text", "actor"), new Print());

        // You can perform aggregations by grouping the stream and then applying an aggregation
        // Note how each actor appears more than once. We are aggregating inside small batches (aka micro batches)
        // This is useful for pre-processing before storing the result to databases
        topology
                .newStream("aggregation", spout)
                .groupBy(new Fields("actor"))
                .aggregate(new Count(),new Fields("count"))
                .each(new Fields("actor", "count"),new Print())
        ;

        // In order ot aggregate across batches, we need persistentAggregate.
        // This example is incrementing a count in the DB, using the result of these micro batch aggregations
        // (here we are simply using a hash map for the "database")
        topology
                .newStream("aggregation", spout)
                .groupBy(new Fields("actor"))
                .persistentAggregate(new MemoryMapState.Factory(),new Count(),new Fields("count"))
        ;

        return topology.build();
    }
 
开发者ID:eshioji,项目名称:trident-tutorial,代码行数:65,代码来源:Part01_BasicPrimitives.java


注:本文中的storm.trident.spout.IBatchSpout类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。