当前位置: 首页>>代码示例>>Java>>正文


Java ThreadPool.schedule方法代码示例

本文整理汇总了Java中org.elasticsearch.threadpool.ThreadPool.schedule方法的典型用法代码示例。如果您正苦于以下问题:Java ThreadPool.schedule方法的具体用法?Java ThreadPool.schedule怎么用?Java ThreadPool.schedule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.elasticsearch.threadpool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.schedule方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: TransportClientNodesService

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
TransportClientNodesService(Settings settings, TransportService transportService,
                                   ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) {
    super(settings);
    this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
    this.transportService = transportService;
    this.threadPool = threadPool;
    this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion();

    this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
    this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
    this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);

    if (logger.isDebugEnabled()) {
        logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
    }

    if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
        this.nodesSampler = new SniffNodesSampler();
    } else {
        this.nodesSampler = new SimpleNodeSampler();
    }
    this.hostFailureListener = hostFailureListener;
    this.nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:25,代码来源:TransportClientNodesService.java

示例2: AckCountDownListener

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes,
                     ThreadPool threadPool) {
    this.ackedTaskListener = ackedTaskListener;
    this.clusterStateVersion = clusterStateVersion;
    this.nodes = nodes;
    int countDown = 0;
    for (DiscoveryNode node : nodes) {
        if (ackedTaskListener.mustAck(node)) {
            countDown++;
        }
    }
    //we always wait for at least 1 node (the master)
    countDown = Math.max(1, countDown);
    logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
    this.countDown = new CountDown(countDown);
    this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, () -> onTimeout());
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:ClusterService.java

示例3: DelayedPrepareBulkRequest

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) {
    this.threadPool = threadPool;
    this.requestsPerSecond = requestsPerSecond;
    this.command = command;
    this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
        @Override
        protected void doRun() throws Exception {
            throttledNanos.addAndGet(delay.nanos());
            command.run();
        }

        @Override
        public void onFailure(Exception e) {
            command.onFailure(e);
        }
    });
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:WorkingBulkByScrollTask.java

示例4: TranslogService

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Inject
public TranslogService(ShardId shardId, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard) {
    super(shardId, indexSettingsService.getSettings());
    this.threadPool = threadPool;
    this.indexSettingsService = indexSettingsService;
    this.indexShard = indexShard;
    this.flushThresholdOperations = indexSettings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, indexSettings.getAsInt("index.translog.flush_threshold", 50000));
    this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(100, ByteSizeUnit.MB));
    this.flushThresholdPeriod = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(10));
    this.interval = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, timeValueMillis(5000));
    this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
    logger.debug("interval [{}], flush_threshold_ops [{}], flush_threshold_size [{}], flush_threshold_period [{}]", interval, flushThresholdOperations, flushThresholdSize, flushThresholdPeriod);

    this.future = threadPool.schedule(interval, ThreadPool.Names.SAME, new TranslogBasedFlush());

    indexSettingsService.addListener(applySettings);
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:18,代码来源:TranslogService.java

示例5: run

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Override
public void run() {
    // don't re-schedule  if its closed..., we are done
    if (closed.get()) {
        return;
    }
    final ThreadPool threadPool = config.getThreadPool();
    if (syncNeeded()) {
        threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() {
            @Override
            public void run() {
                try {
                    sync();
                } catch (Exception e) {
                    logger.warn("failed to sync translog", e);
                }
                if (closed.get() == false) {
                    syncScheduler = threadPool.schedule(config.getSyncInterval(), ThreadPool.Names.SAME, Sync.this);
                }
            }
        });
    } else {
        syncScheduler = threadPool.schedule(config.getSyncInterval(), ThreadPool.Names.SAME, Sync.this);
    }
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:26,代码来源:Translog.java

示例6: TransportClientNodesService

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
                                   ThreadPool threadPool, Headers headers, Version version) {
    super(settings);
    this.clusterName = clusterName;
    this.transportService = transportService;
    this.threadPool = threadPool;
    this.minCompatibilityVersion = version.minimumCompatibilityVersion();
    this.headers = headers;

    this.nodesSamplerInterval = this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
    this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
    this.ignoreClusterName = this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);

    if (logger.isDebugEnabled()) {
        logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
    }

    if (this.settings.getAsBoolean("client.transport.sniff", false)) {
        this.nodesSampler = new SniffNodesSampler();
    } else {
        this.nodesSampler = new SimpleNodeSampler();
    }
    this.nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:26,代码来源:TransportClientNodesService.java

示例7: AckCountDownListener

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
    this.ackedTaskListener = ackedTaskListener;
    this.clusterStateVersion = clusterStateVersion;
    this.nodes = nodes;
    int countDown = 0;
    for (DiscoveryNode node : nodes) {
        if (ackedTaskListener.mustAck(node)) {
            countDown++;
        }
    }
    //we always wait for at least 1 node (the master)
    countDown = Math.max(1, countDown);
    logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
    this.countDown = new CountDown(countDown);
    this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
        @Override
        public void run() {
            onTimeout();
        }
    });
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:22,代码来源:InternalClusterService.java

示例8: IndicesFieldDataCache

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Inject
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener, ThreadPool threadPool) {
    super(settings);
    this.threadPool = threadPool;
    this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
    final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1");
    final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes();
    CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.newBuilder()
            .removalListener(this);
    if (sizeInBytes > 0) {
        cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
    }
    // defaults to 4, but this is a busy map for all indices, increase it a bit by default
    final int concurrencyLevel =  settings.getAsInt(FIELDDATA_CACHE_CONCURRENCY_LEVEL, 16);
    if (concurrencyLevel <= 0) {
        throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
    }
    cacheBuilder.concurrencyLevel(concurrencyLevel);

    logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes));
    cache = cacheBuilder.build();

    this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1));
    // Start thread that will manage cleaning the field data cache periodically
    threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
            new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:28,代码来源:IndicesFieldDataCache.java

示例9: IndicesRequestCache

import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Inject
public IndicesRequestCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) {
    super(settings);
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));

    String size = settings.get(INDICES_CACHE_QUERY_SIZE);
    if (size == null) {
        size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE);
        if (size != null) {
            deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE
                    + "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead");
        }
    }
    if (size == null) {
        // this cache can be very small yet still be very effective
        size = "1%";
    }
    this.size = size;

    this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
    // defaults to 4, but this is a busy map for all indices, increase it a bit by default
    this.concurrencyLevel =  settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16);
    if (concurrencyLevel <= 0) {
        throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
    }
    buildCache();

    this.reaper = new Reaper();
    threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:33,代码来源:IndicesRequestCache.java


注:本文中的org.elasticsearch.threadpool.ThreadPool.schedule方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。