本文整理汇总了Java中org.apache.flume.Context.getInteger方法的典型用法代码示例。如果您正苦于以下问题:Java Context.getInteger方法的具体用法?Java Context.getInteger怎么用?Java Context.getInteger使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.Context
的用法示例。
在下文中一共展示了Context.getInteger方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildBulkProcessor
import org.apache.flume.Context; //导入方法依赖的package包/类
public BulkProcessor buildBulkProcessor(Context context, TransportClient client) {
bulkActions = context.getInteger(ES_BULK_ACTIONS,
DEFAULT_ES_BULK_ACTIONS);
bulkProcessorName = context.getString(ES_BULK_PROCESSOR_NAME,
DEFAULT_ES_BULK_PROCESSOR_NAME);
bulkSize = Util.getByteSizeValue(context.getInteger(ES_BULK_SIZE),
context.getString(ES_BULK_SIZE_UNIT));
concurrentRequest = context.getInteger(ES_CONCURRENT_REQUEST,
DEFAULT_ES_CONCURRENT_REQUEST);
flushIntervalTime = Util.getTimeValue(context.getString(ES_FLUSH_INTERVAL_TIME),
DEFAULT_ES_FLUSH_INTERVAL_TIME);
backoffPolicyTimeInterval = context.getString(ES_BACKOFF_POLICY_TIME_INTERVAL,
DEFAULT_ES_BACKOFF_POLICY_START_DELAY);
backoffPolicyRetries = context.getInteger(ES_BACKOFF_POLICY_RETRIES,
DEFAULT_ES_BACKOFF_POLICY_RETRIES);
return build(client);
}
示例2: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
int syncIntervalBytes =
context.getInteger(SYNC_INTERVAL_BYTES, DEFAULT_SYNC_INTERVAL_BYTES);
String compressionCodec =
context.getString(COMPRESSION_CODEC, DEFAULT_COMPRESSION_CODEC);
writer = new ReflectDatumWriter<T>(getSchema());
dataFileWriter = new DataFileWriter<T>(writer);
dataFileWriter.setSyncInterval(syncIntervalBytes);
try {
CodecFactory codecFactory = CodecFactory.fromString(compressionCodec);
dataFileWriter.setCodec(codecFactory);
} catch (AvroRuntimeException e) {
logger.warn("Unable to instantiate avro codec with name (" +
compressionCodec + "). Compression disabled. Exception follows.", e);
}
}
示例3: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
String hostKey = NetcatSourceConfigurationConstants.CONFIG_HOSTNAME;
String portKey = NetcatSourceConfigurationConstants.CONFIG_PORT;
String ackEventKey = NetcatSourceConfigurationConstants.CONFIG_ACKEVENT;
Configurables.ensureRequiredNonNull(context, hostKey, portKey);
hostName = context.getString(hostKey);
port = context.getInteger(portKey);
ackEveryEvent = context.getBoolean(ackEventKey, true);
maxLineLength = context.getInteger(
NetcatSourceConfigurationConstants.CONFIG_MAX_LINE_LENGTH,
NetcatSourceConfigurationConstants.DEFAULT_MAX_LINE_LENGTH);
sourceEncoding = context.getString(
NetcatSourceConfigurationConstants.CONFIG_SOURCE_ENCODING,
NetcatSourceConfigurationConstants.DEFAULT_ENCODING
);
}
示例4: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
String consumerKey = context.getString("consumerKey");
String consumerSecret = context.getString("consumerSecret");
String accessToken = context.getString("accessToken");
String accessTokenSecret = context.getString("accessTokenSecret");
twitterStream = new TwitterStreamFactory().getInstance();
twitterStream.setOAuthConsumer(consumerKey, consumerSecret);
twitterStream.setOAuthAccessToken(new AccessToken(accessToken,
accessTokenSecret));
twitterStream.addListener(this);
avroSchema = createAvroSchema();
dataFileWriter = new DataFileWriter<GenericRecord>(
new GenericDatumWriter<GenericRecord>(avroSchema));
maxBatchSize = context.getInteger("maxBatchSize", maxBatchSize);
maxBatchDurationMillis = context.getInteger("maxBatchDurationMillis",
maxBatchDurationMillis);
}
示例5: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000L);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " +
context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries / numberOfCloseRetries, 1000);
}
}
示例6: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries/numberOfCloseRetries, 1000);
}
}
示例7: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
public void configure(Context context) {
batchUpperLimit = context.getInteger("batchUpperLimit",1);
PropertyConfigurator.configure(PropertyUtil.getCurrentConfPath() + "log4j.properties");
log.info(PropertyUtil.getCurrentConfPath() + "log4j.properties");
prop.put("fileRootDir", context.getString("fileRootDir",""));
//批量提交Event个数
prop.put("batchUpperLimit", context.getString("batchUpperLimit","1"));
//获取快照文件存放根目录
prop.put("backupFileDirPath", context.getString("backupFileDirPath", ""));
// // 目录文件组合方式 0:文件log4j滚动、1:文件非log4j滚动
// // 2:日期目录+文件log4j滚动 3:日期目录+文件非log4j滚动
// prop.put("rollType", context.getString("rollType",""));
// 日期目录
// prop.put("dateDir", context.getString("dateDir",""));
// 文件前缀(实时文件名前缀)
prop.put("filePrefix", context.getString("filePrefix",""));
// 文件后缀
prop.put("fileSuffix", context.getString("fileSuffix",""));
// 文件字符集
prop.put("charset", context.getString("charset", "UTF-8"));
// 文件字符集
prop.put("bufferSize", context.getString("bufferSize", "4096"));
//设置正则表达式匹配的文件名
prop.put("regexFileName", context.getString("regexFileName", ".*"));
prop.put("clearTimeInterval", context.getString("clearTimeInterval", "3600000"));
}
示例8: LineDeserializer
import org.apache.flume.Context; //导入方法依赖的package包/类
LineDeserializer(Context context, ResettableInputStream in) {
this.in = in;
this.outputCharset = Charset.forName(
context.getString(OUT_CHARSET_KEY, CHARSET_DFLT));
this.maxLineLength = context.getInteger(MAXLINE_KEY, MAXLINE_DFLT);
this.isOpen = true;
}
示例9: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
batchSize = context.getInteger("batchSize", DFLT_BATCH_SIZE);
logger.debug(this.getName() + " " +
"batch size set to " + String.valueOf(batchSize));
Preconditions.checkArgument(batchSize > 0, "Batch size must be > 0");
logEveryNEvents = context.getInteger("logEveryNEvents", DFLT_LOG_EVERY_N_EVENTS);
logger.debug(this.getName() + " " +
"log event N events set to " + logEveryNEvents);
Preconditions.checkArgument(logEveryNEvents > 0, "logEveryNEvents must be > 0");
}
示例10: doConfigure
import org.apache.flume.Context; //导入方法依赖的package包/类
/**
* Read parameters from context
* <li>-maxTotalEvents = type long that defines the total number of Events to be sent
* <li>-maxSuccessfulEvents = type long that defines the number of successful Events
* <li>-size = type int that defines the number of bytes in each Event
* <li>-batchSize = type int that defines the number of Events being sent in one batch
*/
@Override
protected void doConfigure(Context context) throws FlumeException {
/* Limit on the total number of events. */
maxTotalEvents = context.getLong("maxTotalEvents", -1L);
/* Limit on the total number of successful events. */
maxSuccessfulEvents = context.getLong("maxSuccessfulEvents", -1L);
/* Set max events in a batch submission */
batchSize = context.getInteger("batchSize", 1);
/* Size of events to be generated. */
int size = context.getInteger("size", 500);
prepEventData(size);
}
示例11: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
this.pollFrequency = context.getInteger(this.CONF_POLL_FREQUENCY, 60);
String localHosts = context.getString(this.CONF_HOSTS);
if (localHosts == null || localHosts.isEmpty()) {
throw new ConfigurationException("Hosts list cannot be empty.");
}
this.hosts = this.getHostsFromString(localHosts);
this.isGanglia3 = context.getBoolean(this.CONF_ISGANGLIA3, false);
}
示例12: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
syncIntervalBytes =
context.getInteger(SYNC_INTERVAL_BYTES, DEFAULT_SYNC_INTERVAL_BYTES);
compressionCodec =
context.getString(COMPRESSION_CODEC, DEFAULT_COMPRESSION_CODEC);
staticSchemaURL = context.getString(STATIC_SCHEMA_URL, DEFAULT_STATIC_SCHEMA_URL);
}
示例13: BlobDeserializer
import org.apache.flume.Context; //导入方法依赖的package包/类
protected BlobDeserializer(Context context, ResettableInputStream in) {
this.in = in;
this.maxBlobLength = context.getInteger(MAX_BLOB_LENGTH_KEY, MAX_BLOB_LENGTH_DEFAULT);
if (this.maxBlobLength <= 0) {
throw new ConfigurationException("Configuration parameter " + MAX_BLOB_LENGTH_KEY
+ " must be greater than zero: " + maxBlobLength);
}
this.isOpen = true;
}
示例14: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
String regex = context.getString(REGEX_CONFIG, REGEX_DEFAULT);
regexIgnoreCase = context.getBoolean(IGNORE_CASE_CONFIG,
INGORE_CASE_DEFAULT);
depositHeaders = context.getBoolean(DEPOSIT_HEADERS_CONFIG,
DEPOSIT_HEADERS_DEFAULT);
inputPattern = Pattern.compile(regex, Pattern.DOTALL
+ (regexIgnoreCase ? Pattern.CASE_INSENSITIVE : 0));
charset = Charset.forName(context.getString(CHARSET_CONFIG,
CHARSET_DEFAULT));
String colNameStr = context.getString(COL_NAME_CONFIG, COLUMN_NAME_DEFAULT);
String[] columnNames = colNameStr.split(",");
for (String s : columnNames) {
colNames.add(s.getBytes(charset));
}
//Rowkey is optional, default is -1
rowKeyIndex = context.getInteger(ROW_KEY_INDEX_CONFIG, -1);
//if row key is being used, make sure it is specified correct
if (rowKeyIndex >= 0) {
if (rowKeyIndex >= columnNames.length) {
throw new IllegalArgumentException(ROW_KEY_INDEX_CONFIG + " must be " +
"less than num columns " + columnNames.length);
}
if (!ROW_KEY_NAME.equalsIgnoreCase(columnNames[rowKeyIndex])) {
throw new IllegalArgumentException("Column at " + rowKeyIndex + " must be "
+ ROW_KEY_NAME + " and is " + columnNames[rowKeyIndex]);
}
}
}
示例15: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@SuppressWarnings( "deprecation" )
@Override
public void configure(Context context) {
// DBCP 초기화
ConnectionManager.instance.initialize( context );
this.batchsize = context.getInteger(CONF_BATCH_SIZE, DEFAULT_BATCH_SIZE);
this.sqlDialect = SQLDialect.valueOf(context.getString(CONF_SQL_DIALECT).toUpperCase(Locale.ENGLISH));
final String sql = context.getString(CONF_SQL);
if (sql == null) {
Connection connection = null;
try {
// Table 정보 매핑
connection = ConnectionManager.instance.getConnection();
final DSLContext create = DSL.using(connection, sqlDialect);
this.queryGenerator = new MappingQueryGenerator(create, context.getString(CONF_TABLE));
} catch (SQLException ex) {
throw new JDBCSinkException(ex);
} finally {
JDBCUtils.safeClose( connection );
}
} else {
this.queryGenerator = new TemplateQueryGenerator(sqlDialect, sql);
}
this.sinkCounter = new SinkCounter(this.getName());
}