本文整理汇总了Java中org.elasticsearch.threadpool.ThreadPool.executor方法的典型用法代码示例。如果您正苦于以下问题:Java ThreadPool.executor方法的具体用法?Java ThreadPool.executor怎么用?Java ThreadPool.executor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.threadpool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.executor方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ReferenceInfos
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Inject
public ReferenceInfos(Map<String, SchemaInfo> builtInSchemas,
ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
ThreadPool threadPool,
Provider<TransportPutIndexTemplateAction> transportPutIndexTemplateAction,
Functions functions) {
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.transportPutIndexTemplateAction = transportPutIndexTemplateAction;
this.functions = functions;
this.executorService = (ExecutorService) threadPool.executor(ThreadPool.Names.SUGGEST);
schemas.putAll(builtInSchemas);
schemas.remove(BlobSchemaInfo.NAME); // remove blob schema name
this.builtInSchemas = builtInSchemas;
clusterService.add(this);
}
示例2: IndexWarmer
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
super(settings);
ArrayList<Listener> list = new ArrayList<>();
final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
list.add(new FieldDataWarmer(executor));
for (Listener listener : listeners) {
list.add(listener);
}
this.listeners = Collections.unmodifiableList(list);
}
示例3: getLuceneIndexCollector
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
private CrateCollector getLuceneIndexCollector(ThreadPool threadPool,
final RoutedCollectPhase collectPhase,
final ShardProjectorChain projectorChain,
final JobCollectContext jobCollectContext) throws Exception {
SharedShardContext sharedShardContext = jobCollectContext.sharedShardContexts().getOrCreateContext(shardId);
Engine.Searcher searcher = sharedShardContext.searcher();
IndexShard indexShard = sharedShardContext.indexShard();
CrateSearchContext searchContext = null;
try {
searchContext = searchContextFactory.createContext(
sharedShardContext.readerId(),
indexShard,
searcher,
collectPhase.whereClause()
);
jobCollectContext.addSearchContext(sharedShardContext.readerId(), searchContext);
CollectInputSymbolVisitor.Context docCtx = docInputSymbolVisitor.extractImplementations(collectPhase);
Executor executor = threadPool.executor(ThreadPool.Names.SEARCH);
return new CrateDocCollector(
searchContext,
executor,
Symbols.containsColumn(collectPhase.toCollect(), DocSysColumns.SCORE),
jobCollectContext.queryPhaseRamAccountingContext(),
projectorChain.newShardDownstreamProjector(projectorVisitor),
docCtx.topLevelInputs(),
docCtx.docLevelExpressions()
);
} catch (Throwable t) {
if (searchContext == null) {
searcher.close();
} else {
searchContext.close(); // will close searcher too
}
throw t;
}
}
示例4: DocSchemaInfo
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
/**
* DocSchemaInfo constructor for the default (doc) schema.
*/
@Inject
public DocSchemaInfo(ClusterService clusterService,
ThreadPool threadPool,
Provider<TransportPutIndexTemplateAction> transportPutIndexTemplateAction,
IndexNameExpressionResolver indexNameExpressionResolver,
Functions functions) {
this(Schemas.DEFAULT_SCHEMA_NAME,
clusterService,
indexNameExpressionResolver,
(ExecutorService) threadPool.executor(ThreadPool.Names.SUGGEST),
transportPutIndexTemplateAction, functions,
Predicates.and(Predicates.notNull(), DOC_SCHEMA_TABLES_FILTER),
AS_IS_FUNCTION);
}
示例5: BitSetProducerWarmer
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
BitSetProducerWarmer(ThreadPool threadPool) {
this.executor = threadPool.executor(ThreadPool.Names.WARMER);
}
示例6: forkConnect
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
private void forkConnect(final Collection<ActionListener<Void>> toNotify) {
ThreadPool threadPool = transportService.getThreadPool();
ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT);
executor.submit(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
synchronized (queue) {
running.release();
}
try {
ActionListener.onFailure(toNotify, e);
} finally {
maybeConnect();
}
}
@Override
protected void doRun() throws Exception {
ActionListener<Void> listener = ActionListener.wrap((x) -> {
synchronized (queue) {
running.release();
}
try {
ActionListener.onResponse(toNotify, x);
} finally {
maybeConnect();
}
}, (e) -> {
synchronized (queue) {
running.release();
}
try {
ActionListener.onFailure(toNotify, e);
} finally {
maybeConnect();
}
});
collectRemoteNodes(seedNodes.iterator(), transportService, listener);
}
});
}
示例7: testBatchExecute
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
public void testBatchExecute() throws Exception {
// Initialize dependencies of TransportMultiSearchAction
Settings settings = Settings.builder()
.put("node.name", TransportMultiSearchActionTests.class.getSimpleName())
.build();
ActionFilters actionFilters = mock(ActionFilters.class);
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
ThreadPool threadPool = new ThreadPool(settings);
TaskManager taskManager = mock(TaskManager.class);
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null) {
@Override
public TaskManager getTaskManager() {
return taskManager;
}
};
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build());
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY);
// Keep track of the number of concurrent searches started by multi search api,
// and if there are more searches than is allowed create an error and remember that.
int maxAllowedConcurrentSearches = scaledRandomIntBetween(1, 16);
AtomicInteger counter = new AtomicInteger();
AtomicReference<AssertionError> errorHolder = new AtomicReference<>();
// randomize whether or not requests are executed asynchronously
final List<String> threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME);
Randomness.shuffle(threadPoolNames);
final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0));
final ExecutorService rarelyExecutor = threadPool.executor(threadPoolNames.get(1));
final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>()));
TransportAction<SearchRequest, SearchResponse> searchAction = new TransportAction<SearchRequest, SearchResponse>
(Settings.EMPTY, "action", threadPool, actionFilters, resolver, taskManager) {
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
requests.add(request);
int currentConcurrentSearches = counter.incrementAndGet();
if (currentConcurrentSearches > maxAllowedConcurrentSearches) {
errorHolder.set(new AssertionError("Current concurrent search [" + currentConcurrentSearches +
"] is higher than is allowed [" + maxAllowedConcurrentSearches + "]"));
}
final ExecutorService executorService = rarely() ? rarelyExecutor : commonExecutor;
executorService.execute(() -> {
counter.decrementAndGet();
listener.onResponse(new SearchResponse());
});
}
};
TransportMultiSearchAction action =
new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10);
// Execute the multi search api and fail if we find an error after executing:
try {
/*
* Allow for a large number of search requests in a single batch as previous implementations could stack overflow if the number
* of requests in a single batch was large
*/
int numSearchRequests = scaledRandomIntBetween(1, 8192);
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
multiSearchRequest.maxConcurrentSearchRequests(maxAllowedConcurrentSearches);
for (int i = 0; i < numSearchRequests; i++) {
multiSearchRequest.add(new SearchRequest());
}
MultiSearchResponse response = action.execute(multiSearchRequest).actionGet();
assertThat(response.getResponses().length, equalTo(numSearchRequests));
assertThat(requests.size(), equalTo(numSearchRequests));
assertThat(errorHolder.get(), nullValue());
} finally {
assertTrue(ESTestCase.terminate(threadPool));
}
}
示例8: NodeThreadPoolExpression
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
public NodeThreadPoolExpression(ThreadPool threadPool, String name) {
this.threadPoolExecutor = (ThreadPoolExecutor) threadPool.executor(name);
this.name = new BytesRef(name);
addChildImplementations();
}
示例9: internalWarm
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
public TerminationHandle internalWarm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool, final boolean top) {
IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (custom == null) {
return TerminationHandle.NO_WAIT;
}
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(custom.entries().size());
for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
executor.execute(new Runnable() {
@Override
public void run() {
SearchContext context = null;
try {
long now = System.nanoTime();
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.getNumberOfShards(),
SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache());
context = createContext(request, warmerContext.searcher());
// if we use sort, we need to do query to sort on it and load relevant field data
// if not, we might as well set size=0 (and cache if needed)
if (context.sort() == null) {
context.size(0);
}
boolean canCache = indicesQueryCache.canCache(request, context);
// early terminate when we can cache, since we can only do proper caching on top level searcher
// also, if we can't cache, and its top, we don't need to execute it, since we already did when its not top
if (canCache != top) {
return;
}
loadOrExecuteQueryPhase(request, context, queryPhase);
long took = System.nanoTime() - now;
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
} finally {
try {
if (context != null) {
freeContext(context.id());
cleanContext(context);
}
} finally {
latch.countDown();
}
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
示例10: warmNewReaders
import org.elasticsearch.threadpool.ThreadPool; //导入方法依赖的package包/类
@Override
public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, IndexMetaData indexMetaData, IndicesWarmer.WarmerContext context, ThreadPool threadPool) {
if (index.getName().equals(context.shardId().getIndex()) == false) {
// this is from a different index
return TerminationHandle.NO_WAIT;
}
if (!loadRandomAccessFiltersEagerly) {
return TerminationHandle.NO_WAIT;
}
boolean hasNested = false;
final Set<Query> warmUp = new HashSet<>();
final MapperService mapperService = indexShard.mapperService();
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
if (docMapper.hasNestedObjects()) {
hasNested = true;
for (ObjectMapper objectMapper : docMapper.objectMappers().values()) {
if (objectMapper.nested().isNested()) {
ObjectMapper parentObjectMapper = docMapper.findParentObjectMapper(objectMapper);
if (parentObjectMapper != null && parentObjectMapper.nested().isNested()) {
warmUp.add(parentObjectMapper.nestedTypeFilter());
}
}
}
}
}
if (hasNested) {
warmUp.add(Queries.newNonNestedFilter());
}
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(context.searcher().reader().leaves().size() * warmUp.size());
for (final LeafReaderContext ctx : context.searcher().reader().leaves()) {
for (final Query filterToWarm : warmUp) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
getAndLoadIfNotPresent(filterToWarm, ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to load bitset for [{}]", t, filterToWarm);
} finally {
latch.countDown();
}
}
});
}
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}