本文整理汇总了Java中org.elasticsearch.common.util.concurrent.EsExecutors类的典型用法代码示例。如果您正苦于以下问题:Java EsExecutors类的具体用法?Java EsExecutors怎么用?Java EsExecutors使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
EsExecutors类属于org.elasticsearch.common.util.concurrent包,在下文中一共展示了EsExecutors类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Before
@Override
public void setUp() throws Exception {
super.setUp();
final ExecutorService directExecutor = EsExecutors.newDirectExecutorService();
threadPool = new TestThreadPool(getTestName()) {
@Override
public ExecutorService executor(String name) {
return directExecutor;
}
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
command.run();
return null;
}
};
retries = 0;
searchRequest = new SearchRequest();
searchRequest.scroll(timeValueMinutes(5));
searchRequest.source(new SearchSourceBuilder().size(10).version(true).sort("_doc").size(123));
retriesAllowed = 0;
}
示例2: BulkProcessor
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.getBytes();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
示例3: testIndexingThreadPoolsMaxSize
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
public void testIndexingThreadPoolsMaxSize() throws InterruptedException {
final String name = randomFrom(Names.BULK, Names.INDEX);
final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY);
final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE);
// try to create a too big thread pool
final IllegalArgumentException initial =
expectThrows(
IllegalArgumentException.class,
() -> {
ThreadPool tp = null;
try {
tp = new ThreadPool(Settings.builder()
.put("node.name", "testIndexingThreadPoolsMaxSize")
.put("thread_pool." + name + ".size", tooBig)
.build());
} finally {
terminateThreadPoolIfNeeded(tp);
}
});
assertThat(
initial,
hasToString(containsString(
"Failed to parse value [" + tooBig + "] for setting [thread_pool." + name + ".size] must be ")));
}
示例4: createComponents
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
public Collection<Object> createComponents(
Client client,
ClusterService clusterService,
ThreadPool threadPool,
ResourceWatcherService resourceWatcherService,
ScriptService scriptService,
NamedXContentRegistry xContentRegistry) {
final int concurrentConnects = UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[file_based_discovery_resolve]");
fileBasedDiscoveryExecutorService = EsExecutors.newScaling(
"file_based_discovery_resolve",
0,
concurrentConnects,
60,
TimeUnit.SECONDS,
threadFactory,
threadPool.getThreadContext());
return Collections.emptyList();
}
示例5: doStart
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
protected void doStart() {
add(localNodeMasterListeners);
add(taskManager);
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME));
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
final String nodeId = DiscoveryService.generateNodeId(settings);
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, publishAddress, nodeAttributes, version);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
this.transportService.setLocalNode(localNode);
}
示例6: BulkProcessor
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
示例7: newNode
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Bean(destroyMethod="close")
Node newNode() throws NodeValidationException {
final Path tempDir = createTempDir().toPath();
final Settings settings = Settings.builder()
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), new ClusterName("single-node-cluster" + System.nanoTime()))
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
.put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo"))
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent())
.put("node.name", "single-node")
.put("script.inline", "true")
.put("script.stored", "true")
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000)
.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1)
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put("discovery.type", "zen")
.put("transport.type", "local")
.put(Node.NODE_DATA_SETTING.getKey(), true)
.put(NODE_ID_SEED_SETTING.getKey(), System.nanoTime())
.build();
return new Node(settings).start(); // NOSONAR
}
示例8: HttpBulkProcessor
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
HttpBulkProcessor(Settings settings, ElasticsearchClient client, Listener listener, @Nullable String name,
int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.client = client;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
this.semaphore = new Semaphore(concurrentRequests);
this.bulkRequest = new BulkRequest();
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
EsExecutors.daemonThreadFactory(settings, (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(),
flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
示例9: GridFsRepository
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
/**
* Constructs new BlobStoreRepository
* @param name repository name
* @param repositorySettings repository settings
* @param indexShardRepository an instance of IndexShardRepository
* @param gridFsService and instance of GridFsService
*/
@Inject
protected GridFsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, GridFsService gridFsService) {
super(name.getName(), repositorySettings, indexShardRepository);
String database = repositorySettings.settings().get("database", componentSettings.get("database"));
if (database == null) {
throw new RepositoryException(name.name(), "No database defined for GridFS repository");
}
String bucket = repositorySettings.settings().get("bucket", "fs");
String host = repositorySettings.settings().get("gridfs_host", "localhost");
int port = repositorySettings.settings().getAsInt("gridfs_port", 27017);
String username = repositorySettings.settings().get("gridfs_username");
String password = repositorySettings.settings().get("gridfs_password");
int concurrentStreams = repositorySettings.settings().getAsInt("concurrent_streams", componentSettings.getAsInt("concurrent_streams", 5));
ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 5, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[gridfs_stream]"));
blobStore = new GridFsBlobStore(settings, gridFsService.mongoDB(host, port, database, username, password), bucket, concurrentStreamPool);
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", null));
this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", true));
this.basePath = BlobPath.cleanPath();
}
示例10: start
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
public void start() {
logger.info("Starting NVD XML stream ...");
createIndex(indexName);
this.bulkProcessor = createBulkProcessor();
// create one thread per NVD XML stream to fetch
threads = new ArrayList<Thread>(nvdEntries.size());
int threadNumber = 0;
for (NvdEntry nvdEntry : nvdEntries) {
Thread thread = EsExecutors.daemonThreadFactory(
settings.globalSettings(), "nvd_fetcher" + threadNumber++).newThread(
new NvdParser(nvdEntry));
threads.add(thread);
thread.start();
}
}
示例11: start
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
public void start() {
feeder.setClient(client);
feeder.setRiverState(new RiverState(riverName.getName(), riverName.getType())
.setEnabled(true)
.setStarted(new Date()));
PutRiverStateRequestBuilder putRiverStateRequestBuilder = new PutRiverStateRequestBuilder(client.admin().cluster())
.setRiverName(riverName.getName())
.setRiverType(riverName.getType())
.setRiverState(feeder.getRiverState());
PutRiverStateResponse putRiverStateResponse = putRiverStateRequestBuilder.execute().actionGet();
logger.info("saving river state at start: {} -> {}", feeder.getRiverState(), putRiverStateResponse.isAcknowledged());
this.riverThread = EsExecutors.daemonThreadFactory(settings.globalSettings(),
"river(" + riverName().getType() + "/" + riverName().getName() + ")")
.newThread(feeder);
feeder.schedule(riverThread);
}
示例12: start
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
public void start() {
try {
logger.info("creating kafka river: zookeeper = {}, broker = {}, broker_port = {}, message_handler_factory_class = {}", riverConfig.zookeeper, riverConfig.brokerHost, riverConfig.brokerPort, riverConfig.factoryClass);
logger.info("part = {}, topic = {}", riverConfig.partition, riverConfig.topic);
logger.info("bulkSize = {}, bulkTimeout = {}", riverConfig.bulkSize, riverConfig.bulkTimeout);
KafkaRiverWorker worker = new KafkaRiverWorker(this.createMessageHandler(client, riverConfig), riverConfig, client);
thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "kafka_river").newThread(worker);
thread.start();
} catch (Exception e) {
logger.error("Unexpected Error occurred", e);
throw new RuntimeException(e);
}
}
示例13: HttpBulkProcessor
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
HttpBulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.client = client;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
this.semaphore = new Semaphore(concurrentRequests);
this.bulkRequest = new BulkRequest();
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
示例14: BulkProcessor
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = concurrentRequests == 0 ?
new SyncBulkRequestHandler(client, listener) :
new AsyncBulkRequestHandler(client, listener, concurrentRequests);
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
示例15: start
import org.elasticsearch.common.util.concurrent.EsExecutors; //导入依赖的package包/类
@Override
public void start() {
logger.info("starting Jolokia river: hosts [{}], uri [{}], strategy [{}], index [{}]/[{}]",
riverSetting.getHosts(), riverSetting.getUrl(), strategy, indexName, typeName);
try {
riverFlow.startDate(new Date());
riverMouth.createIndexIfNotExists(indexSettings, typeMapping);
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
riverFlow.startDate(null);
// that's fine, continue.
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we recover by the first bulk
} else {
logger.warn("failed to create index [{}], disabling Jolokia river...", e, indexName);
return;
}
}
thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "Jolokia river [" + riverName.name() + '/' + strategy + ']')
.newThread(riverFlow);
thread.start();
}