本文整理汇总了Java中org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy类的典型用法代码示例。如果您正苦于以下问题:Java FileSizeRotationPolicy类的具体用法?Java FileSizeRotationPolicy怎么用?Java FileSizeRotationPolicy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FileSizeRotationPolicy类属于org.apache.storm.hdfs.bolt.rotation包,在下文中一共展示了FileSizeRotationPolicy类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
/**
* Create bolt which will persist ticks to HDFS.
*/
private static HdfsBolt createHdfsBolt() {
// Use "|" instead of "," for field delimiter:
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples:
SyncPolicy syncPolicy = new CountSyncPolicy(100);
// Rotate files when they reach 5MB:
FileRotationPolicy rotationPolicy =
new FileSizeRotationPolicy(5.0f, Units.MB);
// Write records to <user>/stock-ticks/ directory in HDFS:
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("stock-ticks/");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl("hdfs://localhost:8020")
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
return hdfsBolt;
}
示例2: buildHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
private static HdfsBolt buildHdfsBolt(String hdfsUrl,String prefix, Fields fields){
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter(" : ").withFields(fields);
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/storm/").withPrefix(prefix).withExtension(".seq");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl(hdfsUrl)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy)
.withRetryCount(5)
.addRotationAction(new MoveStormToLogAction().withDestination("/log"));
return hdfsBolt;
}
示例3: createHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
private static HdfsBolt createHdfsBolt() {
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath(Properties.getString("sa.storm.hdfs_output_file"));
return new HdfsBolt()
.withFsUrl(Properties.getString("sa.storm.hdfs_url"))
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
}
示例4: getSizeBasedFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
private static FileRotationPolicy getSizeBasedFileRotationPolicy(String unitsConfigured, int sizeConfigured) {
FileSizeRotationPolicy.Units units;
if (unitsConfigured.toUpperCase().equals("KB")) {
units = FileSizeRotationPolicy.Units.KB;
} else if (unitsConfigured.toUpperCase().equals("MB")) {
units = FileSizeRotationPolicy.Units.MB;
} else if (unitsConfigured.toUpperCase().equals("GB")) {
units = FileSizeRotationPolicy.Units.GB;
} else if (unitsConfigured.toUpperCase().equals("TB")) {
units = FileSizeRotationPolicy.Units.TB;
} else {
units = FileSizeRotationPolicy.Units.MB;
}
return new FileSizeRotationPolicy(sizeConfigured, units);
}
示例5: getHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public static HdfsBolt getHdfsBolt(String fsUrl, String srcDir, String rotationDir) {
// sync the filesystem after every tuple
SyncPolicy syncPolicy = new CountSyncPolicy(1);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath(srcDir)
.withExtension(".txt");
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1f, FileSizeRotationPolicy.Units.KB);
HdfsBolt bolt = new HdfsBolt()
.withFsUrl(fsUrl)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withSyncPolicy(syncPolicy)
.withRotationPolicy(rotationPolicy)
.addRotationAction(new MoveFileAction().toDestination(rotationDir));
return bolt;
}
示例6: configureHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public static void configureHdfsBolt(TopologyBuilder builder, String delimiter, String outputPath, String hdfsUri) {
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(delimiter);
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
//FileRotationPolicy rotationPolicy = new TimedRotationPolicy(300, TimedRotationPolicy.TimeUnit.SECONDS);
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputPath);
HdfsBolt bolt = new HdfsBolt()
.withFsUrl(hdfsUri)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
builder.setBolt("hdfsbolt", bolt, 1).shuffleGrouping("kafkaspout");
}
示例7: WARCHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public WARCHdfsBolt() {
super();
FileSizeRotationPolicy rotpol = new FileSizeRotationPolicy(1.0f,
Units.GB);
withRecordFormat(new WARCRecordFormat());
withRotationPolicy(rotpol);
// dummy sync policy
withSyncPolicy(new CountSyncPolicy(10));
// default local filesystem
withFsUrl("file:///");
}
示例8: main
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public static void main(String[] args) {
try{
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// HDFS bolt
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
// FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/tmp/").withPrefix("order_").withExtension(".log");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl("hdfs://wxb-1:8020")
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}
示例9: main
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
String zkIp = "localhost";
String nimbusHost = "sandbox.hortonworks.com";
String zookeeperHost = zkIp +":2181";
ZkHosts zkHosts = new ZkHosts(zookeeperHost);
List<String> zkServers = new ArrayList<String>();
zkServers.add(zkIp);
SpoutConfig kafkaConfig = new SpoutConfig(zkHosts, "spertus-weather-events", "/spertus-weather-events","test_id");
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
kafkaConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
kafkaConfig.zkServers = zkServers;
kafkaConfig.zkRoot = "/spertus-weather-events";
kafkaConfig.zkPort = 2181;
kafkaConfig.forceFromStart = true;
KafkaSpout kafkaSpout = new KafkaSpout(kafkaConfig);
TopologyBuilder builder = new TopologyBuilder();
HdfsBolt hdfsBolt = new HdfsBolt().withFsUrl("hdfs://sandbox.hortonworks.com:8020")
.withFileNameFormat(new DefaultFileNameFormat().withPath("/tmp/test"))
.withRecordFormat(new DelimitedRecordFormat().withFieldDelimiter("|"))
.withSyncPolicy(new CountSyncPolicy(10))
.withRotationPolicy(new FileSizeRotationPolicy(5.0f, Units.MB));
builder.setSpout("raw-weather-events", kafkaSpout, 1);
builder.setBolt("filter-airports", new FilterAirportsBolt(), 1).shuffleGrouping("raw-weather-events");
// builder.setBolt("test-bolt", new TestBolt(), 1).shuffleGrouping("raw-weather-events");
// builder.setBolt("hdfs-bolt", hdfsBolt, 1).shuffleGrouping("raw-weather-events");
Map conf = new HashMap();
conf.put(backtype.storm.Config.TOPOLOGY_WORKERS, 4);
conf.put(backtype.storm.Config.TOPOLOGY_DEBUG, true);
if (args != null && args.length > 0) {
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("weather-topology", conf, builder.createTopology());
}
}
示例10: initializeHDFSBolt
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
private boolean initializeHDFSBolt(String topology_name, String name) {
try {
String messageUpstreamComponent = messageComponents
.get(messageComponents.size() - 1);
System.out.println("[OpenSOC] ------" + name
+ " is initializing from " + messageUpstreamComponent);
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter(
config.getString("bolt.hdfs.field.delimiter")
.toString()).withFields(
new Fields("message"));
// sync the file system after every x number of tuples
SyncPolicy syncPolicy = new CountSyncPolicy(Integer.valueOf(config
.getString("bolt.hdfs.batch.size").toString()));
// rotate files when they reach certain size
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(
Float.valueOf(config.getString(
"bolt.hdfs.file.rotation.size.in.mb").toString()),
Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath(config.getString("bolt.hdfs.wip.file.path")
.toString());
// Post rotate action
MoveFileAction moveFileAction = (new MoveFileAction())
.toDestination(config.getString(
"bolt.hdfs.finished.file.path").toString());
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl(
config.getString("bolt.hdfs.file.system.url")
.toString())
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy)
.addRotationAction(moveFileAction);
if (config.getString("bolt.hdfs.compression.codec.class") != null) {
hdfsBolt.withCompressionCodec(config.getString(
"bolt.hdfs.compression.codec.class").toString());
}
builder.setBolt(name, hdfsBolt,
config.getInt("bolt.hdfs.parallelism.hint"))
.shuffleGrouping(messageUpstreamComponent, "message")
.setNumTasks(config.getInt("bolt.hdfs.num.tasks"));
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
return true;
}
示例11: main
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setNumWorkers(1);
SentenceSpout spout = new SentenceSpout();
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/source/")
.withExtension(".seq");
// create sequence format instance.
DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");
SequenceFileBolt bolt = new SequenceFileBolt()
.withFsUrl(args[0])
.withFileNameFormat(fileNameFormat)
.withSequenceFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy)
.withCompressionType(SequenceFile.CompressionType.RECORD)
.withCompressionCodec("deflate")
.addRotationAction(new MoveFileAction().toDestination("/dest/"));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
// SentenceSpout --> MyBolt
builder.setBolt(BOLT_ID, bolt, 4)
.shuffleGrouping(SENTENCE_SPOUT_ID);
if (args.length == 1) {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
waitForSeconds(120);
cluster.killTopology(TOPOLOGY_NAME);
cluster.shutdown();
System.exit(0);
} else if(args.length == 2) {
StormSubmitter.submitTopology(args[1], config, builder.createTopology());
}
}