当前位置: 首页>>代码示例>>Java>>正文


Java CountSyncPolicy类代码示例

本文整理汇总了Java中org.apache.storm.hdfs.bolt.sync.CountSyncPolicy的典型用法代码示例。如果您正苦于以下问题:Java CountSyncPolicy类的具体用法?Java CountSyncPolicy怎么用?Java CountSyncPolicy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CountSyncPolicy类属于org.apache.storm.hdfs.bolt.sync包,在下文中一共展示了CountSyncPolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
/**
 * Create bolt which will persist ticks to HDFS.
 */
private static HdfsBolt createHdfsBolt() {

  // Use "|" instead of "," for field delimiter:
  RecordFormat format = new DelimitedRecordFormat()
    .withFieldDelimiter("|");
  // sync the filesystem after every 1k tuples:
  SyncPolicy syncPolicy = new CountSyncPolicy(100);

  // Rotate files when they reach 5MB:
  FileRotationPolicy rotationPolicy = 
    new FileSizeRotationPolicy(5.0f, Units.MB);

  // Write records to <user>/stock-ticks/ directory in HDFS:
  FileNameFormat fileNameFormat = new DefaultFileNameFormat()
    .withPath("stock-ticks/");

  HdfsBolt hdfsBolt = new HdfsBolt()
    .withFsUrl("hdfs://localhost:8020")
    .withFileNameFormat(fileNameFormat)
    .withRecordFormat(format)
    .withRotationPolicy(rotationPolicy)
    .withSyncPolicy(syncPolicy);

  return hdfsBolt;
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:29,代码来源:MovingAvgLocalTopologyRunner.java

示例2: buildHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
private static HdfsBolt buildHdfsBolt(String hdfsUrl,String prefix, Fields fields){
    // use "|" instead of "," for field delimiter
    RecordFormat format = new DelimitedRecordFormat()
            .withFieldDelimiter(" : ").withFields(fields);

    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // rotate files
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath("/storm/").withPrefix(prefix).withExtension(".seq");

    HdfsBolt hdfsBolt = new HdfsBolt()
            .withFsUrl(hdfsUrl)
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy)
            .withRetryCount(5)
            .addRotationAction(new MoveStormToLogAction().withDestination("/log"));

    return hdfsBolt;
}
 
开发者ID:wuzhongdehua,项目名称:fksm,代码行数:26,代码来源:KafkaTopology.java

示例3: createHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
private static HdfsBolt createHdfsBolt() {
    // use "|" instead of "," for field delimiter
    RecordFormat format = new DelimitedRecordFormat()
            .withFieldDelimiter("|");

    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // rotate files when they reach 5MB
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath(Properties.getString("sa.storm.hdfs_output_file"));

    return new HdfsBolt()
            .withFsUrl(Properties.getString("sa.storm.hdfs_url"))
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy);
}
 
开发者ID:mayconbordin,项目名称:erad2016-streamprocessing,代码行数:22,代码来源:SentimentAnalysisTopology.java

示例4: getHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public HdfsBolt getHdfsBolt() {

        LOG.info("HDFSBOLT: Configuring the HdfsBolt");

        // Define the RecordFormat, SyncPolicy, and FileNameFormat
        RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(fieldDelimiter);
        SyncPolicy syncPolicy = new CountSyncPolicy(syncCount);
        FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputLocation);

        // Configure the Bolt
        return new HdfsBolt()
                .withFsUrl(hdfsDefaultFs)
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(format)
                .withRotationPolicy(fileRotationPolicy)
                .withSyncPolicy(syncPolicy);

    }
 
开发者ID:sakserv,项目名称:storm-kafka-hdfs-example,代码行数:19,代码来源:HdfsBoltConfigBuilder.java

示例5: getHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public static HdfsBolt getHdfsBolt(String fsUrl, String srcDir, String rotationDir) {
    // sync the filesystem after every tuple
    SyncPolicy syncPolicy = new CountSyncPolicy(1);

    FileNameFormat fileNameFormat = new DefaultFileNameFormat()
            .withPath(srcDir)
            .withExtension(".txt");

    RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1f, FileSizeRotationPolicy.Units.KB);

    HdfsBolt bolt = new HdfsBolt()
            .withFsUrl(fsUrl)
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withSyncPolicy(syncPolicy)
            .withRotationPolicy(rotationPolicy)
            .addRotationAction(new MoveFileAction().toDestination(rotationDir));

    return bolt;
}
 
开发者ID:Parth-Brahmbhatt,项目名称:storm-smoke-test,代码行数:22,代码来源:ConnectorUtil.java

示例6: configureHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public static void configureHdfsBolt(TopologyBuilder builder, 
                                     String delimiter, 
                                     String outputPath, 
                                     String hdfsUri,
                                     String hdfsBoltName, 
                                     String spoutName,
                                     int parallelismHint,
                                     FileRotationPolicy rotationPolicy,
                                     int syncCount) {
    
    LOG.info("HDFSBOLT: Configuring the HdfsBolt");
    
    // Define the RecordFormat, SyncPolicy, and FileNameFormat
    RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(delimiter);
    SyncPolicy syncPolicy = new CountSyncPolicy(syncCount);
    FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputPath);
    
    // Configure the Bolt
    HdfsBolt bolt = new HdfsBolt()
            .withFsUrl(hdfsUri)
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy);
    
    // Set the Bolt
    builder.setBolt(hdfsBoltName, bolt, parallelismHint).shuffleGrouping(spoutName);

}
 
开发者ID:sakserv,项目名称:storm-topology-examples,代码行数:30,代码来源:ConfigureHdfsBolt.java

示例7: init

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
@Override
public void init(Map stormConfig, TopologyContext topologyContext, WriterConfiguration configurations) {
  this.stormConfig = stormConfig;
  this.stellarProcessor = new StellarProcessor();
  this.fileNameFormat.prepare(stormConfig,topologyContext);
  if(syncPolicy != null) {
    //if the user has specified the sync policy, we don't want to override their wishes.
    syncPolicyCreator = new ClonedSyncPolicyCreator(syncPolicy);
  }
  else {
    //if the user has not, then we want to have the sync policy depend on the batch size.
    syncPolicyCreator = (source, config) -> new CountSyncPolicy(config == null?1:config.getBatchSize(source));
  }
}
 
开发者ID:apache,项目名称:metron,代码行数:15,代码来源:HdfsWriter.java

示例8: testClonedPolicy

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
@Test
public void testClonedPolicy() {
  CountSyncPolicy basePolicy = new CountSyncPolicy(5);
  ClonedSyncPolicyCreator creator = new ClonedSyncPolicyCreator(basePolicy);
  //ensure cloned policy continues to work and adheres to the contract: mark on 5th call.
  SyncPolicy clonedPolicy = creator.create("blah", null);
  for(int i = 0;i < 4;++i) {
    Assert.assertFalse(clonedPolicy.mark(null, i));
  }
  Assert.assertTrue(clonedPolicy.mark(null, 5));
  //reclone policy and ensure it adheres to the original contract.
  clonedPolicy = creator.create("blah", null);
  Assert.assertFalse(clonedPolicy.mark(null, 0));
}
 
开发者ID:apache,项目名称:metron,代码行数:15,代码来源:ClonedSyncPolicyCreatorTest.java

示例9: testSingleFileIfNoStreamClosed

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testSingleFileIfNoStreamClosed() throws Exception {
  String function = "FORMAT('test-%s/%s', test.key, test.key)";
  WriterConfiguration config = buildWriterConfiguration(function);
  HdfsWriter writer = new HdfsWriter().withFileNameFormat(testFormat);
  writer.init(new HashMap<String, String>(), createTopologyContext(), config);

  JSONObject message = new JSONObject();
  message.put("test.key", "test.value");
  ArrayList<JSONObject> messages = new ArrayList<>();
  messages.add(message);
  ArrayList<Tuple> tuples = new ArrayList<>();

  CountSyncPolicy basePolicy = new CountSyncPolicy(5);
  ClonedSyncPolicyCreator creator = new ClonedSyncPolicyCreator(basePolicy);

  writer.write(SENSOR_NAME, config, tuples, messages);
  writer.write(SENSOR_NAME, config, tuples, messages);
  writer.close();

  File outputFolder = new File(folder.getAbsolutePath() + "/test-test.value/test.value/");

  // The message should show up twice, once in each file
  ArrayList<String> expected = new ArrayList<>();
  expected.add(message.toJSONString());
  expected.add(message.toJSONString());

  // Assert both messages are in the same file, because the stream stayed open
  Assert.assertEquals(1, outputFolder.listFiles().length);
  for (File file : outputFolder.listFiles()) {
    List<String> lines = Files.readAllLines(file.toPath());
    // One line per file
    Assert.assertEquals(2, lines.size());
    Assert.assertEquals(expected, lines);
  }
}
 
开发者ID:apache,项目名称:metron,代码行数:38,代码来源:HdfsWriterTest.java

示例10: testHandleAttemptsRotateIfStreamClosed

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testHandleAttemptsRotateIfStreamClosed() throws Exception {
  String function = "FORMAT('test-%s/%s', test.key, test.key)";
  WriterConfiguration config = buildWriterConfiguration(function);
  HdfsWriter writer = new HdfsWriter().withFileNameFormat(testFormat);
  writer.init(new HashMap<String, String>(), createTopologyContext(), config);

  JSONObject message = new JSONObject();
  message.put("test.key", "test.value");
  ArrayList<JSONObject> messages = new ArrayList<>();
  messages.add(message);
  ArrayList<Tuple> tuples = new ArrayList<>();

  CountSyncPolicy basePolicy = new CountSyncPolicy(5);
  ClonedSyncPolicyCreator creator = new ClonedSyncPolicyCreator(basePolicy);

  writer.write(SENSOR_NAME, config, tuples, messages);
  writer.getSourceHandler(SENSOR_NAME, "test-test.value/test.value", config).closeOutputFile();
  writer.getSourceHandler(SENSOR_NAME, "test-test.value/test.value", config).handle(message, SENSOR_NAME, config, creator);
  writer.close();

  File outputFolder = new File(folder.getAbsolutePath() + "/test-test.value/test.value/");

  // The message should show up twice, once in each file
  ArrayList<String> expected = new ArrayList<>();
  expected.add(message.toJSONString());

  // Assert this went into a new file because it actually rotated
  Assert.assertEquals(2, outputFolder.listFiles().length);
  for (File file : outputFolder.listFiles()) {
    List<String> lines = Files.readAllLines(file.toPath());
    // One line per file
    Assert.assertEquals(1, lines.size());
    Assert.assertEquals(expected, lines);
  }
}
 
开发者ID:apache,项目名称:metron,代码行数:38,代码来源:HdfsWriterTest.java

示例11: configureHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public static void configureHdfsBolt(TopologyBuilder builder, String delimiter, String outputPath, String hdfsUri) {
    RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(delimiter);
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);
    //FileRotationPolicy rotationPolicy = new TimedRotationPolicy(300, TimedRotationPolicy.TimeUnit.SECONDS);
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB);
    FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputPath);
    HdfsBolt bolt = new HdfsBolt()
            .withFsUrl(hdfsUri)
            .withFileNameFormat(fileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy);
    builder.setBolt("hdfsbolt", bolt, 1).shuffleGrouping("kafkaspout");

}
 
开发者ID:sakserv,项目名称:storm-kafka-hdfs-starter,代码行数:16,代码来源:ConfigureHdfsBolt.java

示例12: WARCHdfsBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public WARCHdfsBolt() {
    super();
    FileSizeRotationPolicy rotpol = new FileSizeRotationPolicy(1.0f,
            Units.GB);
    withRecordFormat(new WARCRecordFormat());
    withRotationPolicy(rotpol);
    // dummy sync policy
    withSyncPolicy(new CountSyncPolicy(10));
    // default local filesystem
    withFsUrl("file:///");
}
 
开发者ID:DigitalPebble,项目名称:storm-crawler,代码行数:12,代码来源:WARCHdfsBolt.java

示例13: main

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public static void main(String[] args) {
    try{
        String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
        String topic = "order";
        String groupId = "id";
        int spoutNum = 3;
        int boltNum = 1;
        ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
        SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        // HDFS bolt
        // use "|" instead of "," for field delimiter
        RecordFormat format = new DelimitedRecordFormat()
                .withFieldDelimiter("|");

        // sync the filesystem after every 1k tuples
        SyncPolicy syncPolicy = new CountSyncPolicy(1000);

        // rotate files when they reach 5MB
        FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
        // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);

        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/tmp/").withPrefix("order_").withExtension(".log");

        HdfsBolt hdfsBolt = new HdfsBolt()
                .withFsUrl("hdfs://wxb-1:8020")
                .withFileNameFormat(fileNameFormat)
                .withRecordFormat(format)
                .withRotationPolicy(rotationPolicy)
                .withSyncPolicy(syncPolicy);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
        builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
        builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");

        Config config = new Config();
        config.setDebug(true);

        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
    }catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:realxujiang,项目名称:storm-kafka-examples,代码行数:61,代码来源:HdfsTopology.java

示例14: configureHDFSBolt

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public void configureHDFSBolt(TopologyBuilder builder) {
  // Use pipe as record boundary

  String rootPath = topologyConfig.getProperty("hdfs.path");
  String prefix = topologyConfig.getProperty("hdfs.file.prefix");
  String fsUrl = topologyConfig.getProperty("hdfs.url");
  String sourceMetastoreUrl = topologyConfig.getProperty("hive.metastore.url");
  String hiveStagingTableName = topologyConfig.getProperty("hive.staging.table.name");
  String databaseName = topologyConfig.getProperty("hive.database.name");
  Float rotationTimeInMinutes = Float.valueOf(topologyConfig.getProperty("hdfs.file.rotation.time.minutes"));

  RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");

  //Synchronize data buffer with the filesystem every 1000 tuples
  SyncPolicy syncPolicy = new CountSyncPolicy(1000);

  // Rotate data files when they reach five MB
  //FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);

  //Rotate every X minutes
  FileTimeRotationPolicy rotationPolicy = new FileTimeRotationPolicy(rotationTimeInMinutes, FileTimeRotationPolicy
      .Units.MINUTES);

  //Hive Partition Action
  HiveTablePartitionAction hivePartitionAction = new HiveTablePartitionAction(sourceMetastoreUrl,
      hiveStagingTableName, databaseName, fsUrl);

  //MoveFileAction moveFileAction = new MoveFileAction().toDestination(rootPath + "/working");


  FileNameFormat fileNameFormat = new DefaultFileNameFormat()
      .withPath(rootPath + "/staging")
      .withPrefix(prefix);

  // Instantiate the HdfsBolt
  HdfsBolt hdfsBolt = new HdfsBolt()
      .withFsUrl(fsUrl)
      .withFileNameFormat(fileNameFormat)
      .withRecordFormat(format)
      .withRotationPolicy(rotationPolicy)
      .withSyncPolicy(syncPolicy)
      .addRotationAction(hivePartitionAction);

  int hdfsBoltCount = Integer.valueOf(topologyConfig.getProperty("hdfsbolt.thread.count"));
  builder.setBolt("hdfs_bolt", hdfsBolt, hdfsBoltCount).shuffleGrouping("kafkaSpout");
}
 
开发者ID:DhruvKumar,项目名称:iot-masterclass,代码行数:47,代码来源:TruckEventProcessorKafkaTopology.java

示例15: main

import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {

		String zkIp = "localhost";

		String nimbusHost = "sandbox.hortonworks.com";

		String zookeeperHost = zkIp +":2181";

		ZkHosts zkHosts = new ZkHosts(zookeeperHost);
		List<String> zkServers = new ArrayList<String>();
		zkServers.add(zkIp);
		SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, "spertus-weather-events", "/spertus-weather-events","test_id");
		kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		kafkaConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
		kafkaConfig.zkServers = zkServers;
		kafkaConfig.zkRoot = "/spertus-weather-events";
		kafkaConfig.zkPort = 2181;
		kafkaConfig.forceFromStart = true;
		KafkaSpout kafkaSpout = new KafkaSpout(kafkaConfig);

		TopologyBuilder builder = new TopologyBuilder();

		HdfsBolt hdfsBolt = new HdfsBolt().withFsUrl("hdfs://sandbox.hortonworks.com:8020")
				.withFileNameFormat(new DefaultFileNameFormat().withPath("/tmp/test"))
				.withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|"))
				.withSyncPolicy(new CountSyncPolicy(10))
				.withRotationPolicy(new FileSizeRotationPolicy(5.0f, Units.MB));
		builder.setSpout("raw-weather-events", kafkaSpout, 1);
		builder.setBolt("filter-airports", new FilterAirportsBolt(), 1).shuffleGrouping("raw-weather-events");
		//        builder.setBolt("test-bolt", new TestBolt(), 1).shuffleGrouping("raw-weather-events");
		//        builder.setBolt("hdfs-bolt", hdfsBolt, 1).shuffleGrouping("raw-weather-events");


		Map conf = new HashMap();
		conf.put(backtype.storm.Config.TOPOLOGY_WORKERS, 4);
		conf.put(backtype.storm.Config.TOPOLOGY_DEBUG, true);
		if (args != null && args.length > 0) {
			StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
		}   else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("weather-topology", conf, builder.createTopology());
		}
	}
 
开发者ID:mspertus,项目名称:Big-Data-tutorial,代码行数:44,代码来源:WeatherTopology.java


注:本文中的org.apache.storm.hdfs.bolt.sync.CountSyncPolicy类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。