本文整理汇总了Java中org.apache.flume.Context.getLong方法的典型用法代码示例。如果您正苦于以下问题:Java Context.getLong方法的具体用法?Java Context.getLong怎么用?Java Context.getLong使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.Context
的用法示例。
在下文中一共展示了Context.getLong方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000L);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " +
context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries / numberOfCloseRetries, 1000);
}
}
示例2: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries/numberOfCloseRetries, 1000);
}
}
示例3: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
Long timeOut = context.getLong("maxTimeOut");
if (timeOut != null) {
maxTimeOut = timeOut;
}
}
示例4: doConfigure
import org.apache.flume.Context; //导入方法依赖的package包/类
/**
* Read parameters from context
* <li>-maxTotalEvents = type long that defines the total number of Events to be sent
* <li>-maxSuccessfulEvents = type long that defines the number of successful Events
* <li>-size = type int that defines the number of bytes in each Event
* <li>-batchSize = type int that defines the number of Events being sent in one batch
*/
@Override
protected void doConfigure(Context context) throws FlumeException {
/* Limit on the total number of events. */
maxTotalEvents = context.getLong("maxTotalEvents", -1L);
/* Limit on the total number of successful events. */
maxSuccessfulEvents = context.getLong("maxSuccessfulEvents", -1L);
/* Set max events in a batch submission */
batchSize = context.getInteger("batchSize", 1);
/* Size of events to be generated. */
int size = context.getInteger("size", 500);
prepEventData(size);
}
示例5: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public synchronized void configure(Context context) {
super.configure(context);
backoffSleepIncrement =
context.getLong(PollableSourceConstants.BACKOFF_SLEEP_INCREMENT,
PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT);
maxBackoffSleep = context.getLong(PollableSourceConstants.MAX_BACKOFF_SLEEP,
PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP);
}
示例6: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context context) {
this.context = context;
String principal = context.getString(AUTH_PRINCIPAL);
String keytab = context.getString(AUTH_KEYTAB);
String effectiveUser = context.getString(AUTH_PROXY_USER);
this.privilegedExecutor = FlumeAuthenticationUtil.getAuthenticator(
principal, keytab).proxyAs(effectiveUser);
// Get the dataset URI and name from the context
String datasetURI = context.getString(CONFIG_KITE_DATASET_URI);
if (datasetURI != null) {
this.datasetUri = URI.create(datasetURI);
this.datasetName = uriToName(datasetUri);
} else {
String repositoryURI = context.getString(CONFIG_KITE_REPO_URI);
Preconditions.checkNotNull(repositoryURI, "No dataset configured. Setting "
+ CONFIG_KITE_DATASET_URI + " is required.");
this.datasetName = context.getString(CONFIG_KITE_DATASET_NAME);
Preconditions.checkNotNull(datasetName, "No dataset configured. Setting "
+ CONFIG_KITE_DATASET_URI + " is required.");
String namespace = context.getString(CONFIG_KITE_DATASET_NAMESPACE,
DEFAULT_NAMESPACE);
this.datasetUri = new URIBuilder(repositoryURI, namespace, datasetName)
.build();
}
this.setName(datasetUri.toString());
if (context.getBoolean(CONFIG_SYNCABLE_SYNC_ON_BATCH,
DEFAULT_SYNCABLE_SYNC_ON_BATCH)) {
Preconditions.checkArgument(
context.getBoolean(CONFIG_FLUSHABLE_COMMIT_ON_BATCH,
DEFAULT_FLUSHABLE_COMMIT_ON_BATCH), "Configuration error: "
+ CONFIG_FLUSHABLE_COMMIT_ON_BATCH + " must be set to true when "
+ CONFIG_SYNCABLE_SYNC_ON_BATCH + " is set to true.");
}
// Create the configured failure failurePolicy
this.failurePolicy = FAILURE_POLICY_FACTORY.newPolicy(context);
// other configuration
this.batchSize = context.getLong(CONFIG_KITE_BATCH_SIZE,
DEFAULT_BATCH_SIZE);
this.rollIntervalSeconds = context.getInteger(CONFIG_KITE_ROLL_INTERVAL,
DEFAULT_ROLL_INTERVAL);
this.counter = new SinkCounter(datasetName);
}
示例7: configure
import org.apache.flume.Context; //导入方法依赖的package包/类
@Override
public void configure(Context ctx) {
//Can remove in the next release
translateOldProps(ctx);
topicStr = ctx.getString(TOPIC_CONFIG);
if (topicStr == null || topicStr.isEmpty()) {
topicStr = DEFAULT_TOPIC;
logger.info("Topic was not specified. Using {} as the topic.", topicStr);
}
topic.set(topicStr);
groupId = ctx.getString(KAFKA_CONSUMER_PREFIX + ConsumerConfig.GROUP_ID_CONFIG);
if (groupId == null || groupId.isEmpty()) {
groupId = DEFAULT_GROUP_ID;
logger.info("Group ID was not specified. Using {} as the group id.", groupId);
}
String bootStrapServers = ctx.getString(BOOTSTRAP_SERVERS_CONFIG);
if (bootStrapServers == null || bootStrapServers.isEmpty()) {
throw new ConfigurationException("Bootstrap Servers must be specified");
}
setProducerProps(ctx, bootStrapServers);
setConsumerProps(ctx, bootStrapServers);
parseAsFlumeEvent = ctx.getBoolean(PARSE_AS_FLUME_EVENT, DEFAULT_PARSE_AS_FLUME_EVENT);
pollTimeout = ctx.getLong(POLL_TIMEOUT, DEFAULT_POLL_TIMEOUT);
staticPartitionId = ctx.getInteger(STATIC_PARTITION_CONF);
partitionHeader = ctx.getString(PARTITION_HEADER_NAME);
migrateZookeeperOffsets = ctx.getBoolean(MIGRATE_ZOOKEEPER_OFFSETS,
DEFAULT_MIGRATE_ZOOKEEPER_OFFSETS);
zookeeperConnect = ctx.getString(ZOOKEEPER_CONNECT_FLUME_KEY);
if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) {
logger.debug("Kafka properties: {}", ctx);
}
if (counter == null) {
counter = new KafkaChannelCounter(getName());
}
}