当前位置: 首页>>代码示例>>Java>>正文


Java Stream.partitionPersist方法代码示例

本文整理汇总了Java中storm.trident.Stream.partitionPersist方法的典型用法代码示例。如果您正苦于以下问题:Java Stream.partitionPersist方法的具体用法?Java Stream.partitionPersist怎么用?Java Stream.partitionPersist使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在storm.trident.Stream的用法示例。


在下文中一共展示了Stream.partitionPersist方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomWordSpout spout1 = new RandomWordSpout();
	
	Stream inputStream = topology.newStream("faltu", spout1);//faltu isnt used anywhere.
	
	/**
	 * partitionPersist : The partitionPersist operation updates a source of state.
	 * It returns a TridentState object. You could then use this state in stateQuery operations elsewhere in the topology.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    .partitionPersist(new RedisStoreStateFactory(), new Fields("randomWord"), new RedisStoreStateUpdater());
	 
	 return topology.build();
}
 
开发者ID:BinitaBharati,项目名称:storm-trident-example,代码行数:22,代码来源:ExampleTopology.java

示例2: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
public static StormTopology buildTopology() {
    LOG.info("Building topology.");
    TridentTopology topology = new TridentTopology();
    SalesSpout spout = new SalesSpout();
    Stream inputStream = topology.newStream("sales", spout);
    SalesMapper mapper = new SalesMapper();
    inputStream.partitionPersist(
            new CassandraCqlIncrementalStateFactory<String, Number>(new Sum(), mapper),
            new Fields("price", "state", "product"),
            new CassandraCqlIncrementalStateUpdater<String, Number>());
    return topology.build();
}
 
开发者ID:hpcc-systems,项目名称:storm-cassandra-cql,代码行数:13,代码来源:SalesTopology.java

示例3: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
@SuppressWarnings({ "rawtypes", "unchecked" })
public static StormTopology buildTopology() {
    LOG.info("Building topology.");
    TridentTopology topology = new TridentTopology();
    SimpleUpdateSpout spout = new SimpleUpdateSpout();
    Stream inputStream = topology.newStream("test", spout);
    SimpleUpdateMapper mapper = new SimpleUpdateMapper();
    inputStream.partitionPersist(new CassandraCqlStateFactory(ConsistencyLevel.ONE), new Fields("test"), new CassandraCqlStateUpdater(mapper));
    // inputStream.each(new Fields("test"), new Debug());
    return topology.build();
}
 
开发者ID:hpcc-systems,项目名称:storm-cassandra-cql,代码行数:12,代码来源:SimpleUpdateTopology.java

示例4: main

import storm.trident.Stream; //导入方法依赖的package包/类
public static void main(String... args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {

		// starting to build topology
		TridentTopology topology = new TridentTopology();

		// Kafka as an opaque trident spout
		OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpoutBuilder(Conf.zookeeper, Conf.inputTopic).build();
		Stream stream = topology.newStream(kafkaSpout, spout);

		// mapping transaction messages to pairs: (person,amount)
		Stream atomicTransactions = stream.each(strF, Functions.mapToPersonAmount, personAmountF);

		// bolt to println data
		atomicTransactions.each(personAmountF, Functions.printlnFunction, emptyF);

		// aggregating transactions and mapping to Kafka messages
		Stream transactionsGroupped = atomicTransactions.groupBy(personF)
				.persistentAggregate(new MemoryMapState.Factory(), amountF, new Sum(), sumF).newValuesStream()
				.each(personSumF, Functions.mapToKafkaMessage, keyMessageF);

		// Kafka as a bolt -- producing to outputTopic
		TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory() //
				.withKafkaTopicSelector(new DefaultTopicSelector(Conf.outputTopic)) //
				.withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>(key, message));
		transactionsGroupped.partitionPersist(stateFactory, keyMessageF, new TridentKafkaUpdater(), emptyF);

		// submitting topology to local cluster
		new LocalCluster().submitTopology(kafkaAccountsTopology, topologyConfig(), topology.build());

		// waiting a while, then running Kafka producer
		Sleep.seconds(5);
		KafkaProduceExample.start(20);

	}
 
开发者ID:dzikowski,项目名称:simple-kafka-storm-java,代码行数:35,代码来源:KafkaStormTridentExample.java

示例5: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
public static StormTopology buildTopology(String hdfsUrl){
    FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", "key"), 1000, new Values("the cow jumped over the moon", 1l),
            new Values("the man went to the store and bought some candy", 2l), new Values("four score and seven years ago", 3l),
            new Values("how many apples can you eat", 4l), new Values("to be or not to be the person", 5l));
    spout.setCycle(true);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("spout1", spout);

    Fields hdfsFields = new Fields("sentence", "key");

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/trident")
            .withPrefix("trident")
            .withExtension(".txt");

    RecordFormat recordFormat = new DelimitedRecordFormat()
            .withFields(hdfsFields);

    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);

    HdfsState.Options options = new HdfsState.HdfsFileOptions()
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(recordFormat)
            .withRotationPolicy(rotationPolicy)
            .withFsUrl(hdfsUrl);

    StateFactory factory = new HdfsStateFactory().withOptions(options);

    TridentState state = stream
            .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());

    return topology.build();
}
 
开发者ID:ptgoetz,项目名称:storm-hdfs,代码行数:35,代码来源:TridentFileTopology.java

示例6: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
public static StormTopology buildTopology(String hdfsUrl){
    FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", "key"), 1000, new Values("the cow jumped over the moon", 1l),
            new Values("the man went to the store and bought some candy", 2l), new Values("four score and seven years ago", 3l),
            new Values("how many apples can you eat", 4l), new Values("to be or not to be the person", 5l));
    spout.setCycle(true);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("spout1", spout);

    Fields hdfsFields = new Fields("sentence", "key");

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/trident")
            .withPrefix("trident")
            .withExtension(".seq");

    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);

    HdfsState.Options seqOpts = new HdfsState.SequenceFileOptions()
            .withFileNameFormat(fileNameFormat)
            .withSequenceFormat(new DefaultSequenceFormat("key", "sentence"))
            .withRotationPolicy(rotationPolicy)
            .withFsUrl(hdfsUrl)
            .addRotationAction(new MoveFileAction().toDestination("/dest2/"));

    StateFactory factory = new HdfsStateFactory().withOptions(seqOpts);

    TridentState state = stream
            .partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());

    return topology.build();
}
 
开发者ID:ptgoetz,项目名称:storm-hdfs,代码行数:33,代码来源:TridentSequenceTopology.java

示例7: buildTopology

import storm.trident.Stream; //导入方法依赖的package包/类
public static StormTopology buildTopology(String hbaseRoot){
    Fields fields = new Fields("word", "count");
    FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
            new Values("storm", 1),
            new Values("trident", 1),
            new Values("needs", 1),
            new Values("javadoc", 1)
    );
    spout.setCycle(true);

    TridentHBaseMapper tridentHBaseMapper = new SimpleTridentHBaseMapper()
            .withColumnFamily("cf")
            .withColumnFields(new Fields("word"))
            .withCounterFields(new Fields("count"))
            .withRowKeyField("word");

    HBaseValueMapper rowToStormValueMapper = new WordCountValueMapper();

    HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
    projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("cf", "count"));

    HBaseState.Options options = new HBaseState.Options()
            .withConfigKey(hbaseRoot)
            .withDurability(Durability.SYNC_WAL)
            .withMapper(tridentHBaseMapper)
            .withProjectionCriteria(projectionCriteria)
            .withRowToStormValueMapper(rowToStormValueMapper)
            .withTableName("WordCount");

    StateFactory factory = new HBaseStateFactory(options);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("spout1", spout);

    stream.partitionPersist(factory, fields,  new HBaseUpdater(), new Fields());

    TridentState state = topology.newStaticState(factory);
    stream = stream.stateQuery(state, new Fields("word"), new HBaseQuery(), new Fields("columnName","columnValue"));
    stream.each(new Fields("word","columnValue"), new PrintFunction(), new Fields());
    return topology.build();
}
 
开发者ID:mengzhiyi,项目名称:storm-hbase-1.0.x,代码行数:42,代码来源:WordCountTrident.java

示例8: main

import storm.trident.Stream; //导入方法依赖的package包/类
public static void main(String[] args) {
		
		
		//definición de la topología
		Config conf = new Config();
//      conf.put(Config.TOPOLOGY_DEBUG,true);
		String entrada="D:\\produban\\Logs\\bepxnxusrsp01\\vmwtbitarecol01\\sample_5500.txt";
		String salida="d:\\parseado_proxy.log";
		if(args.length==2){
			entrada=args[0];
			salida=args[1];
		}
      
		SimpleFileStringSpout spout1 = new SimpleFileStringSpout(entrada, "linea");
		
		Fields hdfsFields = new Fields("eventTimeStamp","timeTaken","clientIP","User","Group","Exception","filterResult","category",
			    "referer","responseCode","action","method","contentType","protocol","requestDomain",
			    "requestPort","requestPath","requestQuery","requestURIExtension","userAgent","serverIP","scBytes","csBytes",
			    "virusID","destinationIP");
		//Fields hdfsFields = new Fields("resultado");
		
	    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
	            .withPath("/user/cloudera/")
	            .withPrefix("trident")
	            .withExtension(".txt");
	
	    RecordFormat recordFormat = new DelimitedRecordFormat()
	            .withFields(hdfsFields).withFieldDelimiter("\001");
	
	    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(10.0f, FileSizeRotationPolicy.Units.MB);
	
	    
	   HdfsState.Options options = new HdfsState.HdfsFileOptions()
	           .withFileNameFormat(fileNameFormat)
	           .withRecordFormat(recordFormat)
	           .withRotationPolicy(rotationPolicy)
	           .withFsUrl("hdfs://192.168.182.129:8020");
	   System.setProperty("HADOOP_USER_NAME", "cloudera");//Necesario para que no intente entrar con el usuario que lanza el programa
	  
	    StateFactory factory = new HdfsStateFactory().withOptions(options);
	    
		TridentTopology topology = new TridentTopology();
		Stream  parseaLogs =
		     topology.newStream("spout1", spout1)
		       .each(new Fields("linea"),
		    		 new ParseProxy(),
		    		 new Fields("eventTimeStamp","timeTaken","clientIP","User","Group","Exception","filterResult","category",
		    				    "referer","responseCode","action","method","contentType","protocol","requestDomain",
		    				    "requestPort","requestPath","requestQuery","requestURIExtension","userAgent","serverIP","scBytes","csBytes",
		    				    "virusID","destinationIP"));
		    	parseaLogs.partitionPersist(factory, hdfsFields, new HdfsUpdater(), new Fields());
		       //.each(new Fields("eventTimeStamp","timeTaken","clientIP","User"),new Print("",salida));
		
		   LocalCluster cluster = new LocalCluster();
	      cluster.submitTopology("basic_primitives", conf, topology.build());
	}
 
开发者ID:Produban,项目名称:openbus,代码行数:57,代码来源:HDFSOutputTopology.java


注:本文中的storm.trident.Stream.partitionPersist方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。