本文整理汇总了Java中io.airlift.units.Duration.valueOf方法的典型用法代码示例。如果您正苦于以下问题:Java Duration.valueOf方法的具体用法?Java Duration.valueOf怎么用?Java Duration.valueOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.airlift.units.Duration
的用法示例。
在下文中一共展示了Duration.valueOf方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import io.airlift.units.Duration; //导入方法依赖的package包/类
@Override
public void initialize(URI uri, Configuration conf)
throws IOException
{
requireNonNull(uri, "uri is null");
requireNonNull(conf, "conf is null");
super.initialize(uri, conf);
setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDirectory = new Path("/").makeQualified(this.uri, new Path("/"));
HiveClientConfig defaults = new HiveClientConfig();
this.stagingDirectory = new File(conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
this.maxBackoffTime = Duration.valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
Duration connectTimeout = Duration.valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
Duration socketTimeout = Duration.valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials());
this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion());
this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
ClientConfiguration configuration = new ClientConfiguration()
.withMaxErrorRetry(maxErrorRetries)
.withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
.withConnectionTimeout(Ints.checkedCast(connectTimeout.toMillis()))
.withSocketTimeout(Ints.checkedCast(socketTimeout.toMillis()))
.withMaxConnections(maxConnections);
this.s3 = createAmazonS3Client(uri, conf, configuration);
transferConfig.setMultipartUploadThreshold(minFileSize);
transferConfig.setMinimumUploadPartSize(minPartSize);
}
示例2: setKafkaConnectTimeout
import io.airlift.units.Duration; //导入方法依赖的package包/类
@Config("kafka.connect-timeout")
public KafkaConnectorConfig setKafkaConnectTimeout(String kafkaConnectTimeout)
{
this.kafkaConnectTimeout = Duration.valueOf(kafkaConnectTimeout);
return this;
}
示例3: setRedisConnectTimeout
import io.airlift.units.Duration; //导入方法依赖的package包/类
@Config("redis.connect-timeout")
public RedisConnectorConfig setRedisConnectTimeout(String redisConnectTimeout)
{
this.redisConnectTimeout = Duration.valueOf(redisConnectTimeout);
return this;
}
示例4: setup
import io.airlift.units.Duration; //导入方法依赖的package包/类
protected final void setup(String host, int port, String databaseName, String timeZoneId, String connectorName, int maxOutstandingSplits, int maxThreads)
{
setupHive(connectorName, databaseName, timeZoneId);
HiveClientConfig hiveClientConfig = new HiveClientConfig();
hiveClientConfig.setTimeZone(timeZoneId);
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
metastoreClient = new CachingHiveMetastore(hiveCluster, executor, Duration.valueOf("1m"), Duration.valueOf("15s"));
HiveConnectorId connectorId = new HiveConnectorId(connectorName);
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig));
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig);
locationService = new HiveLocationService(metastoreClient, hdfsEnvironment);
TypeManager typeManager = new TypeRegistry();
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadata = new HiveMetadata(
connectorId,
metastoreClient,
hdfsEnvironment,
new HivePartitionManager(connectorId, hiveClientConfig),
timeZone,
10,
true,
true,
true,
true,
true,
typeManager,
locationService,
partitionUpdateCodec,
newFixedThreadPool(2));
splitManager = new HiveSplitManager(
connectorId,
metastoreClient,
new NamenodeStats(),
hdfsEnvironment,
new HadoopDirectoryLister(),
newDirectExecutorService(),
maxOutstandingSplits,
hiveClientConfig.getMinPartitionBatchSize(),
hiveClientConfig.getMaxPartitionBatchSize(),
hiveClientConfig.getMaxSplitSize(),
hiveClientConfig.getMaxInitialSplitSize(),
hiveClientConfig.getMaxInitialSplits(),
false
);
pageSinkProvider = new HivePageSinkProvider(hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, DEFAULT_HIVE_RECORD_CURSOR_PROVIDER, DEFAULT_HIVE_DATA_STREAM_FACTORIES, TYPE_MANAGER);
}