本文整理汇总了Java中org.elasticsearch.common.util.concurrent.AbstractRunnable类的典型用法代码示例。如果您正苦于以下问题:Java AbstractRunnable类的具体用法?Java AbstractRunnable怎么用?Java AbstractRunnable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AbstractRunnable类属于org.elasticsearch.common.util.concurrent包,在下文中一共展示了AbstractRunnable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loopRead
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
public void loopRead(Executor executor) {
executor.execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (isOpen.get()) {
try {
onException(MockChannel.this, e);
} catch (Exception ex) {
logger.warn("failed on handling exception", ex);
IOUtils.closeWhileHandlingException(MockChannel.this); // pure paranoia
}
}
}
@Override
protected void doRun() throws Exception {
StreamInput input = new InputStreamStreamInput(new BufferedInputStream(activeChannel.getInputStream()));
// There is a (slim) chance that we get interrupted right after a loop iteration, so check explicitly
while (isOpen.get() && !Thread.currentThread().isInterrupted()) {
cancellableThreads.executeIO(() -> readMessage(MockChannel.this, input));
}
}
});
}
示例2: executeIndexRequest
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
public void executeIndexRequest(IndexRequest request, Consumer<Exception> failureHandler, Consumer<Boolean> completionHandler) {
Pipeline pipeline = getPipeline(request.getPipeline());
threadPool.executor(ThreadPool.Names.INDEX).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
failureHandler.accept(e);
}
@Override
protected void doRun() throws Exception {
innerExecute(request, pipeline);
completionHandler.accept(true);
}
});
}
示例3: schedule
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
public void schedule() {
future = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
if (cancelScheduling.get()) {
return;
}
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, DelayedRerouteTask.this);
}
@Override
public void onFailure(Exception e) {
logger.warn("failed to submit schedule/execute reroute post unassigned shard", e);
removeIfSameTask(DelayedRerouteTask.this);
}
});
}
示例4: onResponse
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
@Override
public void onResponse(final Response response) {
threadPool.executor(executor).execute(new AbstractRunnable() {
@Override
public boolean isForceExecution() {
return forceExecution;
}
@Override
protected void doRun() throws Exception {
listener.onResponse(response);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
示例5: DelayedPrepareBulkRequest
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) {
this.threadPool = threadPool;
this.requestsPerSecond = requestsPerSecond;
this.command = command;
this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
throttledNanos.addAndGet(delay.nanos());
command.run();
}
@Override
public void onFailure(Exception e) {
command.onFailure(e);
}
});
}
示例6: testThreadPoolRejectionsAbortRequest
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
/**
* Mimicks a ThreadPool rejecting execution of the task.
*/
public void testThreadPoolRejectionsAbortRequest() throws Exception {
testTask.rethrottle(1);
setupClient(new TestThreadPool(getTestName()) {
@Override
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
// While we're here we can check that the sleep made it through
assertThat(delay.nanos(), greaterThan(0L));
assertThat(delay.seconds(), lessThanOrEqualTo(10L));
((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test"));
return null;
}
});
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);
simulateScrollResponse(new DummyAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
assertThat(client.scrollsCleared, contains(scrollId));
// When the task is rejected we don't increment the throttled timer
assertEquals(timeValueMillis(0), testTask.getStatus().getThrottled());
}
示例7: handleMergeException
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
logger.error("failed to merge", exc);
if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Throwable t) {
logger.debug("merge failure action rejected", t);
}
@Override
protected void doRun() throws Exception {
MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
failEngine("merge failed", e);
}
});
}
}
示例8: nodeIndexDeleted
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) {
final DiscoveryNodes nodes = clusterState.nodes();
transportService.sendRequest(clusterState.nodes().masterNode(),
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
if (nodes.localNode().isDataNode() == false) {
logger.trace("[{}] not acking store deletion (not a data node)", index);
return;
}
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Throwable t) {
logger.warn("[{}] failed to ack index store deleted for index", t, index);
}
@Override
protected void doRun() throws Exception {
lockIndexAndAck(index, nodes, nodeId, clusterState, indexSettings);
}
});
}
示例9: doExecute
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
@Override
protected void doExecute(final RenderSearchTemplateRequest request, final ActionListener<RenderSearchTemplateResponse> listener) {
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
@Override
protected void doRun() throws Exception {
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.<String, String>emptyMap());
BytesReference processedTemplate = (BytesReference) executable.run();
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
response.source(processedTemplate);
listener.onResponse(response);
}
});
}
示例10: writeIndexingBufferAsync
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
/** ask this shard to refresh, in the background, to free up heap */
protected void writeIndexingBufferAsync(IndexShard shard) {
threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() {
@Override
public void doRun() {
shard.writeIndexingBuffer();
}
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e);
}
});
}
示例11: afterMerge
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
@Override
public synchronized void afterMerge(OnGoingMerge merge) {
int maxNumMerges = mergeScheduler.getMaxMergeCount();
if (numMergesInFlight.decrementAndGet() < maxNumMerges) {
if (isThrottling.getAndSet(false)) {
logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
deactivateThrottling();
}
}
if (indexWriter.hasPendingMerges() == false && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) {
// NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer
// we deadlock on engine#close for instance.
engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (isClosed.get() == false) {
logger.warn("failed to flush after merge has finished");
}
}
@Override
protected void doRun() throws Exception {
// if we have no pending merges and we are supposed to flush once merges have finished
// we try to renew a sync commit which is the case when we are having a big merge after we
// are inactive. If that didn't work we go and do a real flush which is ok since it only doesn't work
// if we either have records in the translog or if we don't have a sync ID at all...
// maybe even more important, we flush after all merges finish and we are inactive indexing-wise to
// free up transient disk usage of the (presumably biggish) segments that were just merged
if (tryRenewSyncCommit() == false) {
flush();
}
}
});
}
}
示例12: maybeFlush
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
/**
* Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
* Flush thread-pool asynchronously.
*
* @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
*/
public boolean maybeFlush() {
if (shouldFlush()) {
if (asyncFlushRunning.compareAndSet(false, true)) { // we can't use a lock here since we "release" in a different thread
if (shouldFlush() == false) {
// we have to check again since otherwise there is a race when a thread passes
// the first shouldFlush() check next to another thread which flushes fast enough
// to finish before the current thread could flip the asyncFlushRunning flag.
// in that situation we have an extra unexpected flush.
asyncFlushRunning.compareAndSet(true, false);
} else {
logger.debug("submitting async flush request");
final AbstractRunnable abstractRunnable = new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws Exception {
flush(new FlushRequest());
}
@Override
public void onAfter() {
asyncFlushRunning.compareAndSet(true, false);
maybeFlush(); // fire a flush up again if we have filled up the limits such that shouldFlush() returns true
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable);
return true;
}
}
}
return false;
}
示例13: performStateRecovery
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
private void performStateRecovery(boolean enforceRecoverAfterTime, String reason) {
final Gateway.GatewayStateRecoveredListener recoveryListener = new GatewayRecoveryListener();
if (enforceRecoverAfterTime && recoverAfterTime != null) {
if (scheduledRecovery.compareAndSet(false, true)) {
logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason);
threadPool.schedule(recoverAfterTime, ThreadPool.Names.GENERIC, () -> {
if (recovered.compareAndSet(false, true)) {
logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime);
gateway.performStateRecovery(recoveryListener);
}
});
}
} else {
if (recovered.compareAndSet(false, true)) {
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn("Recovery failed", e);
// we reset `recovered` in the listener don't reset it here otherwise there might be a race
// that resets it to false while a new recover is already running?
recoveryListener.onFailure("state recovery failed: " + e.getMessage());
}
@Override
protected void doRun() throws Exception {
gateway.performStateRecovery(recoveryListener);
}
});
}
}
}
示例14: getRunningTaskFromNode
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
/**
* Executed on the node that should be running the task to find and return the running task. Falls back to
* {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} if the task isn't still running.
*/
void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListener<GetTaskResponse> listener) {
Task runningTask = taskManager.getTask(request.getTaskId().getId());
if (runningTask == null) {
// Task isn't running, go look in the task index
getFinishedTaskFromIndex(thisTask, request, listener);
} else {
if (request.getWaitForCompletion()) {
// Shift to the generic thread pool and let it wait for the task to complete so we don't block any important threads.
threadPool.generic().execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
taskManager.waitForTaskCompletion(runningTask, waitForCompletionTimeout(request.getTimeout()));
waitedForCompletion(thisTask, request, runningTask.taskInfo(clusterService.localNode().getId(), true), listener);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
} else {
TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true);
listener.onResponse(new GetTaskResponse(new TaskResult(false, info)));
}
}
}
示例15: delayPrepareBulkRequest
import org.elasticsearch.common.util.concurrent.AbstractRunnable; //导入依赖的package包/类
/**
* Schedule prepareBulkRequestRunnable to run after some delay. This is where throttling plugs into reindexing so the request can be
* rescheduled over and over again.
*/
void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchStartTime, int lastBatchSize,
AbstractRunnable prepareBulkRequestRunnable) {
// Synchronize so we are less likely to schedule the same request twice.
synchronized (delayedPrepareBulkRequestReference) {
TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize);
delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(),
delay, new RunOnce(prepareBulkRequestRunnable)));
}
}