当前位置: 首页>>代码示例>>Java>>正文


Java Duration.valueOf方法代码示例

本文整理汇总了Java中io.airlift.units.Duration.valueOf方法的典型用法代码示例。如果您正苦于以下问题:Java Duration.valueOf方法的具体用法?Java Duration.valueOf怎么用?Java Duration.valueOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在io.airlift.units.Duration的用法示例。


在下文中一共展示了Duration.valueOf方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import io.airlift.units.Duration; //导入方法依赖的package包/类
@Override
public void initialize(URI uri, Configuration conf)
        throws IOException
{
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path("/").makeQualified(this.uri, new Path("/"));

    HiveClientConfig defaults = new HiveClientConfig();
    this.stagingDirectory = new File(conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration.valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration.valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration.valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());

    ClientConfiguration configuration = new ClientConfiguration()
            .withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(Ints.checkedCast(connectTimeout.toMillis()))
            .withSocketTimeout(Ints.checkedCast(socketTimeout.toMillis()))
            .withMaxConnections(maxConnections);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}
 
开发者ID:y-lan,项目名称:presto,代码行数:41,代码来源:PrestoS3FileSystem.java

示例2: setKafkaConnectTimeout

import io.airlift.units.Duration; //导入方法依赖的package包/类
@Config("kafka.connect-timeout")
public KafkaConnectorConfig setKafkaConnectTimeout(String kafkaConnectTimeout)
{
    this.kafkaConnectTimeout = Duration.valueOf(kafkaConnectTimeout);
    return this;
}
 
开发者ID:y-lan,项目名称:presto,代码行数:7,代码来源:KafkaConnectorConfig.java

示例3: setRedisConnectTimeout

import io.airlift.units.Duration; //导入方法依赖的package包/类
@Config("redis.connect-timeout")
public RedisConnectorConfig setRedisConnectTimeout(String redisConnectTimeout)
{
    this.redisConnectTimeout = Duration.valueOf(redisConnectTimeout);
    return this;
}
 
开发者ID:y-lan,项目名称:presto,代码行数:7,代码来源:RedisConnectorConfig.java

示例4: setup

import io.airlift.units.Duration; //导入方法依赖的package包/类
protected final void setup(String host, int port, String databaseName, String timeZoneId, String connectorName, int maxOutstandingSplits, int maxThreads)
{
    setupHive(connectorName, databaseName, timeZoneId);

    HiveClientConfig hiveClientConfig = new HiveClientConfig();
    hiveClientConfig.setTimeZone(timeZoneId);
    String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
    if (proxy != null) {
        hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
    }

    HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
    metastoreClient = new CachingHiveMetastore(hiveCluster, executor, Duration.valueOf("1m"), Duration.valueOf("15s"));
    HiveConnectorId connectorId = new HiveConnectorId(connectorName);
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig));

    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig);
    locationService = new HiveLocationService(metastoreClient, hdfsEnvironment);
    TypeManager typeManager = new TypeRegistry();
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    metadata = new HiveMetadata(
            connectorId,
            metastoreClient,
            hdfsEnvironment,
            new HivePartitionManager(connectorId, hiveClientConfig),
            timeZone,
            10,
            true,
            true,
            true,
            true,
            true,
            typeManager,
            locationService,
            partitionUpdateCodec,
            newFixedThreadPool(2));
    splitManager = new HiveSplitManager(
            connectorId,
            metastoreClient,
            new NamenodeStats(),
            hdfsEnvironment,
            new HadoopDirectoryLister(),
            newDirectExecutorService(),
            maxOutstandingSplits,
            hiveClientConfig.getMinPartitionBatchSize(),
            hiveClientConfig.getMaxPartitionBatchSize(),
            hiveClientConfig.getMaxSplitSize(),
            hiveClientConfig.getMaxInitialSplitSize(),
            hiveClientConfig.getMaxInitialSplits(),
            false
    );
    pageSinkProvider = new HivePageSinkProvider(hdfsEnvironment, metastoreClient, new GroupByHashPageIndexerFactory(), typeManager, new HiveClientConfig(), locationService, partitionUpdateCodec);
    pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment, DEFAULT_HIVE_RECORD_CURSOR_PROVIDER, DEFAULT_HIVE_DATA_STREAM_FACTORIES, TYPE_MANAGER);
}
 
开发者ID:y-lan,项目名称:presto,代码行数:55,代码来源:AbstractTestHiveClient.java


注:本文中的io.airlift.units.Duration.valueOf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。