本文整理汇总了Java中org.elasticsearch.action.admin.indices.flush.FlushRequest类的典型用法代码示例。如果您正苦于以下问题:Java FlushRequest类的具体用法?Java FlushRequest怎么用?Java FlushRequest使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FlushRequest类属于org.elasticsearch.action.admin.indices.flush包,在下文中一共展示了FlushRequest类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handleRequest
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
flushRequest.force(request.paramAsBoolean("force", flushRequest.force()));
flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing()));
client.admin().indices().flush(flushRequest, new RestBuilderListener<FlushResponse>(channel) {
@Override
public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
buildBroadcastShardsHeader(builder, request, response);
builder.endObject();
return new BytesRestResponse(OK, builder);
}
});
}
示例2: flush
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException {
boolean waitIfOngoing = request.waitIfOngoing();
boolean force = request.force();
if (logger.isTraceEnabled()) {
logger.trace("flush with {}", request);
}
// we allows flush while recovering, since we allow for operations to happen
// while recovering, and we want to keep the translog at bay (up to deletes, which
// we don't gc). Yet, we don't use flush internally to clear deletes and flush the indexwriter since
// we use #writeIndexingBuffer for this now.
verifyNotClosed();
Engine engine = getEngine();
if (engine.isRecovering()) {
throw new IllegalIndexShardStateException(shardId(), state, "flush is only allowed if the engine is not recovery" +
" from translog");
}
long time = System.nanoTime();
Engine.CommitId commitId = engine.flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time);
return commitId;
}
示例3: prepareRequest
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
flushRequest.force(request.paramAsBoolean("force", flushRequest.force()));
flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing()));
return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener<FlushResponse>(channel) {
@Override
public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
buildBroadcastShardsHeader(builder, request, response);
builder.endObject();
return new BytesRestResponse(OK, builder);
}
});
}
示例4: flush
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException {
boolean waitIfOngoing = request.waitIfOngoing();
boolean force = request.force();
if (logger.isTraceEnabled()) {
logger.trace("flush with {}", request);
}
// we allows flush while recovering, since we allow for operations to happen
// while recovering, and we want to keep the translog at bay (up to deletes, which
// we don't gc).
verifyStartedOrRecovering();
long time = System.nanoTime();
Engine.CommitId commitId = engine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time);
return commitId;
}
示例5: writeDependencyLinks
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@VisibleForTesting void writeDependencyLinks(List<DependencyLink> links, long timestampMillis) {
long midnight = Util.midnightUTC(timestampMillis);
TransportClient client = ((NativeClient) storage.client()).client;
BulkRequestBuilder request = client.prepareBulk();
for (DependencyLink link : links) {
request.add(client.prepareIndex(
storage.indexNameFormatter.indexNameForTimestamp(midnight),
ElasticsearchConstants.DEPENDENCY_LINK)
.setId(link.parent + "|" + link.child) // Unique constraint
.setSource(
"parent", link.parent,
"child", link.child,
"callCount", link.callCount));
}
request.execute().actionGet();
client.admin().indices().flush(new FlushRequest()).actionGet();
}
示例6: insertDocuments
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
private void insertDocuments() throws IOException {
SimpleInsertAction action = new SimpleInsertAction(new DocumentSenderFactory());
Properties inputProps = new Properties();
inputProps.put(CommandPropsConstants.INDEX_OPT, INDEX_NAME);
inputProps.put(CommandPropsConstants.TYPE_OPT, INDEX_TYPE);
inputProps.put(InsertProperties.DOCS, String.valueOf(TOTAL_DOCS));
Properties resourceProps = ResourceUtils.asProperties("default.properties");
DefaultProperties defaults = new DefaultProperties(inputProps, resourceProps);
InsertProperties insProperties = new InsertProperties(defaults);
String workloadAsText = ResourceUtils.asString("workloads/config02.json");
DocumentFactory<String> factory = action.getFactory(insProperties, new StringReader(workloadAsText));
DocumentSender sender = new DocumentSenderImpl(client);
sender.send(factory, insProperties);
client.admin().indices().flush(new FlushRequest(INDEX_NAME)).actionGet();
}
示例7: initCluster
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@BeforeClass
public void initCluster() throws IOException {
client = getClient();
CreateIndexRequest indexRequest = new CreateIndexRequest(INDEX_NAME);
assertTrue(client.admin().indices().create(indexRequest).actionGet().isAcknowledged());
String mapping = ResourceUtils.asString("mapping_request.json");
PutMappingRequestBuilder builder = new PutMappingRequestBuilder(client, PutMappingAction.INSTANCE);
PutMappingRequest request = builder.setIndices(INDEX_NAME).setType(INDEX_TYPE).setSource(mapping).request();
assertTrue(client.admin().indices().putMapping(request).actionGet().isAcknowledged());
String doc01 = ResourceUtils.asString("documents/doc01.json");
String doc02 = ResourceUtils.asString("documents/doc02.json");
IndexRequestBuilder indexBuilder = new IndexRequestBuilder(client, IndexAction.INSTANCE, INDEX_NAME).setType(INDEX_TYPE);
assertTrue(client.index(indexBuilder.setId("1").setSource(doc01).request()).actionGet().isCreated());
assertTrue(client.index(indexBuilder.setId("2").setSource(doc02).request()).actionGet().isCreated());
client.admin().indices().flush(new FlushRequest(INDEX_NAME)).actionGet();
defaultProperties = new DefaultProperties("default.properties");
}
示例8: initCluster
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@BeforeClass
public void initCluster() throws IOException {
client = getClient();
CreateIndexRequest indexRequest = new CreateIndexRequest(INDEX_NAME);
assertTrue(client.admin().indices().create(indexRequest).actionGet().isAcknowledged());
String mapping = ResourceUtils.asString("mapping_request.json");
PutMappingRequestBuilder builder = new PutMappingRequestBuilder(client, PutMappingAction.INSTANCE);
PutMappingRequest request = builder.setIndices(INDEX_NAME).setType(INDEX_TYPE).setSource(mapping).request();
assertTrue(client.admin().indices().putMapping(request).actionGet().isAcknowledged());
String doc01 = ResourceUtils.asString("documents/doc01.json");
String doc02 = ResourceUtils.asString("documents/doc02.json");
IndexRequestBuilder indexBuilder = new IndexRequestBuilder(client, IndexAction.INSTANCE, INDEX_NAME).setType(INDEX_TYPE);
assertTrue(client.index(indexBuilder.setId("1").setSource(doc01).request()).actionGet().isCreated());
assertTrue(client.index(indexBuilder.setId("2").setSource(doc02).request()).actionGet().isCreated());
client.admin().indices().flush(new FlushRequest(INDEX_NAME)).actionGet();
defaultProperties = new DefaultProperties("default.properties");
}
示例9: performPreSyncedFlush
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId());
Engine.CommitId commitId = indexShard.flush(flushRequest);
logger.trace("{} pre sync flush done. commit id {}", request.shardId(), commitId);
return new PreSyncedFlushResponse(commitId);
}
示例10: maybeFlush
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
/**
* Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
* Flush thread-pool asynchronously.
*
* @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
*/
public boolean maybeFlush() {
if (shouldFlush()) {
if (asyncFlushRunning.compareAndSet(false, true)) { // we can't use a lock here since we "release" in a different thread
if (shouldFlush() == false) {
// we have to check again since otherwise there is a race when a thread passes
// the first shouldFlush() check next to another thread which flushes fast enough
// to finish before the current thread could flip the asyncFlushRunning flag.
// in that situation we have an extra unexpected flush.
asyncFlushRunning.compareAndSet(true, false);
} else {
logger.debug("submitting async flush request");
final AbstractRunnable abstractRunnable = new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws Exception {
flush(new FlushRequest());
}
@Override
public void onAfter() {
asyncFlushRunning.compareAndSet(true, false);
maybeFlush(); // fire a flush up again if we have filled up the limits such that shouldFlush() returns true
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable);
return true;
}
}
}
return false;
}
示例11: assertImmediateResponse
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) throws InterruptedException, ExecutionException {
Date beginDate = new Date();
FlushResponse flushResponse = flushAction.execute(new FlushRequest(index)).get();
Date endDate = new Date();
long maxTime = 500;
assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime));
return flushResponse;
}
示例12: performPreSyncedFlush
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId());
Engine.CommitId commitId = indexShard.flush(flushRequest);
logger.trace("{} pre sync flush done. commit id {}", request.shardId(), commitId);
return new PreSyncedFlushResponse(commitId);
}
示例13: testSearchByMetadata
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@Test
public void testSearchByMetadata() throws InterruptedException {
try {
Long testUserTagId = Double.doubleToLongBits(Math.random()) % 10000;
String testUser = Long.toHexString(Double.doubleToLongBits(Math.random()));
String responsible = "responsible";
DataTagCacheObject tag = new DataTagCacheObject(testUserTagId);
tag.getMetadata().getMetadata().put(responsible, testUser);
tagDocumentListener.onConfigurationEvent(tag, ConfigConstants.Action.CREATE);
Long tag1234Id = Double.doubleToLongBits(Math.random()) % 10000;
String value1234 = "1234";
tag = new DataTagCacheObject(tag1234Id);
String key1234 = "1234";
tag.getMetadata().getMetadata().put(key1234, value1234);
tagDocumentListener.onConfigurationEvent(tag, ConfigConstants.Action.CREATE);
client.getClient().admin().indices().flush(new FlushRequest()).actionGet();
Thread.sleep(10000);
ElasticsearchService service = new ElasticsearchService(properties);
assertEquals("There should be 2 tags, one for responsible and one for 1234", 2, service.getDistinctMetadataKeys().size());
Collection<Long> tagsForResponsibleUser = service.findByMetadata(responsible, testUser);
assertEquals("There should be one tag with responsible user set to requested value", 1, tagsForResponsibleUser.size());
assertEquals(testUserTagId, tagsForResponsibleUser.stream().findFirst().get());
Collection<Long> tags1234 = service.findByMetadata(key1234, value1234);
assertEquals("There should be one tag with 1234 parameter set to requested value", 1, tags1234.size());
assertEquals(tag1234Id, tags1234.stream().findFirst().get());
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
示例14: testSearchByNameAndMetadata
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@Test
public void testSearchByNameAndMetadata() throws InterruptedException {
try {
Long testUserTagId = Double.doubleToLongBits(Math.random()) % 10000;
String testUser = Long.toHexString(Double.doubleToLongBits(Math.random()));
String metadataKey = "metadataKey";
DataTagCacheObject tag = new DataTagCacheObject(testUserTagId);
String tagname = "tagname";
tag.setName(tagname);
tag.getMetadata().getMetadata().put(metadataKey, testUser);
tagDocumentListener.onConfigurationEvent(tag, ConfigConstants.Action.CREATE);
tag = new DataTagCacheObject(Double.doubleToLongBits(Math.random()) % 10000);
tag.setName(tagname);
tag.getMetadata().getMetadata().put(metadataKey, "some other metadata value");
tagDocumentListener.onConfigurationEvent(tag, ConfigConstants.Action.CREATE);
tag = new DataTagCacheObject(Double.doubleToLongBits(Math.random()) % 10000);
tag.setName("other_tagname");
tag.getMetadata().getMetadata().put(metadataKey, testUser);
tagDocumentListener.onConfigurationEvent(tag, ConfigConstants.Action.CREATE);
client.getClient().admin().indices().flush(new FlushRequest()).actionGet();
Thread.sleep(10000);
ElasticsearchService service = new ElasticsearchService(properties);
Collection<Long> tagsForResponsibleUser = service.findTagsByNameAndMetadata(tagname, metadataKey, testUser);
assertEquals("There should be one tag with given name and metadata", 1, tagsForResponsibleUser.size());
assertEquals(testUserTagId, tagsForResponsibleUser.stream().findFirst().get());
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
示例15: initCluster
import org.elasticsearch.action.admin.indices.flush.FlushRequest; //导入依赖的package包/类
@BeforeClass
public void initCluster() throws IOException {
client = getClient();
CreateIndexRequest indexRequest = new CreateIndexRequest(INDEX_NAME);
assertTrue(client.admin().indices().create(indexRequest).actionGet().isAcknowledged());
client.admin().indices().flush(new FlushRequest(INDEX_NAME)).actionGet();
insertDocuments();
}