本文整理汇总了Java中org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy类的典型用法代码示例。如果您正苦于以下问题:Java FileRotationPolicy类的具体用法?Java FileRotationPolicy怎么用?Java FileRotationPolicy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FileRotationPolicy类属于org.apache.storm.hdfs.bolt.rotation包,在下文中一共展示了FileRotationPolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
/**
* Create bolt which will persist ticks to HDFS.
*/
private static HdfsBolt createHdfsBolt() {
// Use "|" instead of "," for field delimiter:
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples:
SyncPolicy syncPolicy = new CountSyncPolicy(100);
// Rotate files when they reach 5MB:
FileRotationPolicy rotationPolicy =
new FileSizeRotationPolicy(5.0f, Units.MB);
// Write records to <user>/stock-ticks/ directory in HDFS:
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("stock-ticks/");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl("hdfs://localhost:8020")
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
return hdfsBolt;
}
示例2: buildHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
private static HdfsBolt buildHdfsBolt(String hdfsUrl,String prefix, Fields fields){
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter(" : ").withFields(fields);
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/storm/").withPrefix(prefix).withExtension(".seq");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl(hdfsUrl)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy)
.withRetryCount(5)
.addRotationAction(new MoveStormToLogAction().withDestination("/log"));
return hdfsBolt;
}
示例3: createHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
private static HdfsBolt createHdfsBolt() {
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath(Properties.getString("sa.storm.hdfs_output_file"));
return new HdfsBolt()
.withFsUrl(Properties.getString("sa.storm.hdfs_url"))
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
}
示例4: getTimeBasedFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static FileRotationPolicy getTimeBasedFileRotationPolicy(String rotationUnits, int rotationCount) {
TimedRotationPolicy.TimeUnit units;
if (rotationUnits.toUpperCase().equals("SECONDS")) {
units = TimedRotationPolicy.TimeUnit.SECONDS;
} else if (rotationUnits.toUpperCase().equals("MINUTES")) {
units = TimedRotationPolicy.TimeUnit.MINUTES;
} else if (rotationUnits.toUpperCase().equals("HOURS")) {
units = TimedRotationPolicy.TimeUnit.HOURS;
} else if (rotationUnits.toUpperCase().equals("DAYS")) {
units = TimedRotationPolicy.TimeUnit.DAYS;
} else {
units = TimedRotationPolicy.TimeUnit.MINUTES;
}
return new TimedRotationPolicy(rotationCount, units);
}
示例5: getHdfsBoltConfigBuilder
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
/**
* Returns the HdfsBoltConfigBuilder ready for use
* @return HdfsBoltConfigBuilder Used to construct the kafka spout configuration
*/
private HdfsBoltConfigBuilder getHdfsBoltConfigBuilder() {
FileRotationPolicy fileRotationPolicy = HdfsBoltConfigBuilder.getTimeBasedFileRotationPolicy(
propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_ROTATION_POLICY_UNITS_KEY),
Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_ROTATION_POLICY_COUNT_KEY)));
return new HdfsBoltConfigBuilder.Builder()
.setFieldDelimiter(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_FIELD_DELIMITER_KEY))
.setOutputLocation(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_OUTPUT_LOCATION_KEY))
.setHdfsDefaultFs(propertyParser.getProperty(ConfigVars.HDFS_DEFAULT_FS_KEY))
.setSyncCount(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_SYNC_COUNT_KEY)))
.setBoltParallelism(Integer.parseInt(
propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_PARALLELISM_KEY)))
.setBoltName(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_NAME_KEY))
.setFileRotationPolicy(fileRotationPolicy)
.build();
}
示例6: getSizeBasedFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
private static FileRotationPolicy getSizeBasedFileRotationPolicy(String unitsConfigured, int sizeConfigured) {
FileSizeRotationPolicy.Units units;
if (unitsConfigured.toUpperCase().equals("KB")) {
units = FileSizeRotationPolicy.Units.KB;
} else if (unitsConfigured.toUpperCase().equals("MB")) {
units = FileSizeRotationPolicy.Units.MB;
} else if (unitsConfigured.toUpperCase().equals("GB")) {
units = FileSizeRotationPolicy.Units.GB;
} else if (unitsConfigured.toUpperCase().equals("TB")) {
units = FileSizeRotationPolicy.Units.TB;
} else {
units = FileSizeRotationPolicy.Units.MB;
}
return new FileSizeRotationPolicy(sizeConfigured, units);
}
示例7: getTimeBasedFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
private static FileRotationPolicy getTimeBasedFileRotationPolicy(String unitsConfigured, int timeConfigured) {
TimedRotationPolicy.TimeUnit units;
if (unitsConfigured.toUpperCase().equals("SECONDS")) {
units = TimedRotationPolicy.TimeUnit.SECONDS;
} else if (unitsConfigured.toUpperCase().equals("MINUTES")) {
units = TimedRotationPolicy.TimeUnit.MINUTES;
} else if (unitsConfigured.toUpperCase().equals("HOURS")) {
units = TimedRotationPolicy.TimeUnit.HOURS;
} else if (unitsConfigured.toUpperCase().equals("DAYS")) {
units = TimedRotationPolicy.TimeUnit.DAYS;
} else {
units = TimedRotationPolicy.TimeUnit.MINUTES;
}
return new TimedRotationPolicy(timeConfigured, units);
}
示例8: getHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static HdfsBolt getHdfsBolt(String fsUrl, String srcDir, String rotationDir) {
// sync the filesystem after every tuple
SyncPolicy syncPolicy = new CountSyncPolicy(1);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath(srcDir)
.withExtension(".txt");
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1f, FileSizeRotationPolicy.Units.KB);
HdfsBolt bolt = new HdfsBolt()
.withFsUrl(fsUrl)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withSyncPolicy(syncPolicy)
.withRotationPolicy(rotationPolicy)
.addRotationAction(new MoveFileAction().toDestination(rotationDir));
return bolt;
}
示例9: configureHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static void configureHdfsBolt(TopologyBuilder builder,
String delimiter,
String outputPath,
String hdfsUri,
String hdfsBoltName,
String spoutName,
int parallelismHint,
FileRotationPolicy rotationPolicy,
int syncCount) {
LOG.info("HDFSBOLT: Configuring the HdfsBolt");
// Define the RecordFormat, SyncPolicy, and FileNameFormat
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(delimiter);
SyncPolicy syncPolicy = new CountSyncPolicy(syncCount);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputPath);
// Configure the Bolt
HdfsBolt bolt = new HdfsBolt()
.withFsUrl(hdfsUri)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
// Set the Bolt
builder.setBolt(hdfsBoltName, bolt, parallelismHint).shuffleGrouping(spoutName);
}
示例10: configureFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static FileRotationPolicy configureFileRotationPolicy(String propsFileName) throws IOException {
// Parse the properties file
PropertyParser propertyParser = new PropertyParser();
propertyParser.parsePropsFile(propsFileName);
// Get the config value to determine which FileRotationPolicy is enabled
boolean useTimeBasedFileRotationPolicy =
Boolean.parseBoolean(propertyParser.getProperty(
ConfigVars.HDFS_BOLT_USE_TIME_BASED_FILEROTATIONPOLICY_KEY));
boolean useSizeBasedFileRotationPolicy =
Boolean.parseBoolean(propertyParser.getProperty(
ConfigVars.HDFS_BOLT_USE_SIZE_BASED_FILEROTATIONPOLICY_KEY));
// Guard against both size and time based policies being enabled
if (useSizeBasedFileRotationPolicy && useTimeBasedFileRotationPolicy) {
LOG.error("ERROR: You cannot use both time and size based rotation policies");
LOG.error("ERROR: Validate that either hdfs.bolt.use.time.based.filerotationpolicy" +
" or hdfs.bolt.use.size.based.filerotationpolicy is false");
throw new IllegalArgumentException("ERROR: You cannot use both time and size based rotation policies");
}
// Return the appropriate FileRotationPolicy based on the config
if (useSizeBasedFileRotationPolicy) {
return getSizeBasedFileRotationPolicy(
propertyParser.getProperty(ConfigVars.HDFS_BOLT_USE_SIZE_BASED_FILEROTATIONPOLICY_UNIT_KEY),
Integer.parseInt(propertyParser.getProperty(
ConfigVars.HDFS_BOLT_USE_SIZE_BASED_FILEROTATIONPOLICY_SIZE_KEY)));
} else {
return getTimeBasedFileRotationPolicy(
propertyParser.getProperty(ConfigVars.HDFS_BOLT_USE_TIME_BASED_FILEROTATIONPOLICY_UNIT_KEY),
Integer.parseInt(propertyParser.getProperty(
ConfigVars.HDFS_BOLT_USE_TIME_BASED_FILEROTATIONPOLICY_DURATION_KEY)));
}
}
示例11: SourceHandler
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public SourceHandler(List<RotationAction> rotationActions
, FileRotationPolicy rotationPolicy
, SyncPolicy syncPolicy
, FileNameFormat fileNameFormat
, SourceHandlerCallback cleanupCallback) throws IOException {
this.rotationActions = rotationActions;
this.rotationPolicy = rotationPolicy;
this.syncPolicy = syncPolicy;
this.fileNameFormat = fileNameFormat;
this.cleanupCallback = cleanupCallback;
initialize();
}
示例12: configureHdfsBolt
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static void configureHdfsBolt(TopologyBuilder builder, String delimiter, String outputPath, String hdfsUri) {
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(delimiter);
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
//FileRotationPolicy rotationPolicy = new TimedRotationPolicy(300, TimedRotationPolicy.TimeUnit.SECONDS);
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1, FileSizeRotationPolicy.Units.KB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath(outputPath);
HdfsBolt bolt = new HdfsBolt()
.withFsUrl(hdfsUri)
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
builder.setBolt("hdfsbolt", bolt, 1).shuffleGrouping("kafkaspout");
}
示例13: main
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public static void main(String[] args) {
try{
String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
String topic = "order";
String groupId = "id";
int spoutNum = 3;
int boltNum = 1;
ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id
spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
// HDFS bolt
// use "|" instead of "," for field delimiter
RecordFormat format = new DelimitedRecordFormat()
.withFieldDelimiter("|");
// sync the filesystem after every 1k tuples
SyncPolicy syncPolicy = new CountSyncPolicy(1000);
// rotate files when they reach 5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB);
// FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES);
FileNameFormat fileNameFormat = new DefaultFileNameFormat()
.withPath("/tmp/").withPrefix("order_").withExtension(".log");
HdfsBolt hdfsBolt = new HdfsBolt()
.withFsUrl("hdfs://wxb-1:8020")
.withFileNameFormat(fileNameFormat)
.withRecordFormat(format)
.withRotationPolicy(rotationPolicy)
.withSyncPolicy(syncPolicy);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", kafkaSpout, spoutNum);
builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");
builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter");
Config config = new Config();
config.setDebug(true);
if(args!=null && args.length > 0) {
config.setNumWorkers(2);
StormSubmitter.submitTopology(args[0], config, builder.createTopology());
} else {
config.setMaxTaskParallelism(2);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());
Thread.sleep(500000);
cluster.shutdown();
}
}catch (Exception e) {
e.printStackTrace();
}
}
示例14: getFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public FileRotationPolicy getFileRotationPolicy() {
return fileRotationPolicy;
}
示例15: setFileRotationPolicy
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy; //导入依赖的package包/类
public Builder setFileRotationPolicy(FileRotationPolicy fileRotationPolicy) {
this.fileRotationPolicy = fileRotationPolicy;
return this;
}