本文整理汇总了Java中org.elasticsearch.common.unit.TimeValue类的典型用法代码示例。如果您正苦于以下问题:Java TimeValue类的具体用法?Java TimeValue怎么用?Java TimeValue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TimeValue类属于org.elasticsearch.common.unit包,在下文中一共展示了TimeValue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteByQuery
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
/**
* Delete documents using a query. Check what would be deleted first with a normal search query!
* Elasticsearch once provided a native prepareDeleteByQuery method, but this was removed
* in later versions. Instead, there is a plugin which iterates over search results,
* see https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugins-delete-by-query.html
* We simulate the same behaviour here without the need of that plugin.
*
* @param q
* @return delete document count
*/
public int deleteByQuery(String indexName, final QueryBuilder q) {
Map<String, String> ids = new TreeMap<>();
SearchResponse response = elasticsearchClient.prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH)
.setScroll(new TimeValue(60000)).setQuery(q).setSize(100).execute().actionGet();
while (true) {
// accumulate the ids here, don't delete them right now to prevent an interference of the delete with the
// scroll
for (SearchHit hit : response.getHits().getHits()) {
ids.put(hit.getId(), hit.getType());
}
response = elasticsearchClient.prepareSearchScroll(response.getScrollId()).setScroll(new TimeValue(600000))
.execute().actionGet();
// termination
if (response.getHits().getHits().length == 0)
break;
}
return deleteBulk(indexName, ids);
}
示例2: updateMappingOnMaster
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout);
if (listener == null) {
request.execute();
} else {
final ActionListener<PutMappingResponse> actionListener = new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse response) {
if (response.isAcknowledged()) {
listener.onMappingUpdate();
} else {
listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]"));
}
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
};
request.execute(actionListener);
}
}
示例3: testThatBulkProcessorCountIsCorrect
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void testThatBulkProcessorCountIsCorrect() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
int numDocs = randomIntBetween(10, 100);
try (BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo")
//let's make sure that the bulk action limit trips, one single execution will index all the documents
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
.build()) {
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs);
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
}
}
示例4: testBulkProcessorFlush
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void testBulkProcessorFlush() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
int numDocs = randomIntBetween(10, 100);
try (BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo")
//let's make sure that this bulk won't be automatically flushed
.setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100))
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false));
//we really need an explicit flush as none of the bulk thresholds was reached
processor.flush();
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs);
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
}
}
示例5: scheduleTimeout
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void scheduleTimeout(ScheduledExecutorService timer, final Runnable timeoutCallback, TimeValue timeValue) {
synchronized (this) {
if (timeoutFuture != null) {
throw new IllegalStateException("scheduleTimeout may only be called once");
}
if (started == false) {
timeoutFuture = timer.schedule(new Runnable() {
@Override
public void run() {
if (remove(TieBreakingPrioritizedRunnable.this)) {
runAndClean(timeoutCallback);
}
}
}, timeValue.nanos(), TimeUnit.NANOSECONDS);
}
}
}
示例6: clusterHealth
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, int numberOfInFlightFetch,
TimeValue pendingTaskTimeInQueue) {
if (logger.isTraceEnabled()) {
logger.trace("Calculating health based on state version [{}]", clusterState.version());
}
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
} catch (IndexNotFoundException e) {
// one of the specified indices is not there - treat it as RED.
ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState,
numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState),
pendingTaskTimeInQueue);
response.setStatus(ClusterHealthStatus.RED);
return response;
}
return new ClusterHealthResponse(clusterState.getClusterName().value(), concreteIndices, clusterState, numberOfPendingTasks,
numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue);
}
示例7: handleTransportDisconnect
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
@Override
protected void handleTransportDisconnect(DiscoveryNode node) {
synchronized (masterNodeMutex) {
if (!node.equals(this.masterNode)) {
return;
}
if (connectOnNetworkDisconnect) {
try {
transportService.connectToNode(node);
// if all is well, make sure we restart the pinger
if (masterPinger != null) {
masterPinger.stop();
}
this.masterPinger = new MasterPinger();
// we use schedule with a 0 time value to run the pinger on the pool as it will run on later
threadPool.schedule(TimeValue.timeValueMillis(0), ThreadPool.Names.SAME, masterPinger);
} catch (Exception e) {
logger.trace("[master] [{}] transport disconnected (with verified connect)", masterNode);
notifyMasterFailure(masterNode, "transport disconnected (with verified connect)");
}
} else {
logger.trace("[master] [{}] transport disconnected", node);
notifyMasterFailure(node, "transport disconnected");
}
}
}
示例8: testDefaultRecoverAfterTime
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void testDefaultRecoverAfterTime() throws IOException {
// check that the default is not set
GatewayService service = createService(Settings.builder());
assertNull(service.recoverAfterTime());
// ensure default is set when setting expected_nodes
service = createService(Settings.builder().put("gateway.expected_nodes", 1));
assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
// ensure default is set when setting expected_data_nodes
service = createService(Settings.builder().put("gateway.expected_data_nodes", 1));
assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
// ensure default is set when setting expected_master_nodes
service = createService(Settings.builder().put("gateway.expected_master_nodes", 1));
assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
// ensure settings override default
TimeValue timeValue = TimeValue.timeValueHours(3);
// ensure default is set when setting expected_nodes
service = createService(Settings.builder().put("gateway.expected_nodes", 1).put("gateway.recover_after_time", timeValue.toString()));
assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis()));
}
示例9: testNumericSort
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void testNumericSort() throws Exception {
int numShards = setupIndex(true);
SearchResponse sr = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setSize(0)
.get();
int numDocs = (int) sr.getHits().getTotalHits();
assertThat(numDocs, equalTo(NUM_DOCS));
int max = randomIntBetween(2, numShards*3);
for (String field : new String[]{"_uid", "random_int", "static_int"}) {
int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = client().prepareSearch("test")
.setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10)))
.addSort(SortBuilders.fieldSort("random_int"))
.setSize(fetchSize);
assertSearchSlicesWithScroll(request, field, max);
}
}
示例10: IndicesStore
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
@Inject
public IndicesStore(Settings settings, NodeSettingsService nodeSettingsService, IndicesService indicesService,
ClusterService clusterService, TransportService transportService) {
super(settings);
this.nodeSettingsService = nodeSettingsService;
this.indicesService = indicesService;
this.clusterService = clusterService;
this.transportService = transportService;
transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest.class, ThreadPool.Names.SAME, new ShardActiveRequestHandler());
// we don't limit by default (we default to CMS's auto throttle instead):
this.rateLimitingType = settings.get("indices.store.throttle.type", DEFAULT_RATE_LIMITING_TYPE);
rateLimiting.setType(rateLimitingType);
this.rateLimitingThrottle = settings.getAsBytesSize("indices.store.throttle.max_bytes_per_sec", DEFAULT_INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC);
rateLimiting.setMaxRate(rateLimitingThrottle);
this.deleteShardTimeout = settings.getAsTime(INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS));
logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle);
nodeSettingsService.addListener(applySettings);
clusterService.addLast(this);
}
示例11: createSearchContext
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher)
throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,
engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase);
boolean success = false;
try {
// we clone the query shard context here just for rewriting otherwise we
// might end up with incorrect state since we are using now() or script services
// during rewrite and normalized / evaluate templates etc.
request.rewrite(new QueryShardContext(searchContext.getQueryShardContext()));
assert searchContext.getQueryShardContext().isCachable();
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(searchContext);
}
}
return searchContext;
}
示例12: iterator
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
@Override
public Iterator<TimeValue> iterator() {
return new Iterator<TimeValue>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public TimeValue next() {
throw new NoSuchElementException("No backoff");
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
示例13: buildFromContent
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) {
try (XContentParser parser = XContentHelper.createParser(content)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Malforrmed content, must start with an object");
} else {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
searchScrollRequest.scrollId(parser.text());
} else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
searchScrollRequest.scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll")));
} else {
throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] ");
}
}
}
} catch (IOException e) {
throw new IllegalArgumentException("Failed to parse request body", e);
}
}
示例14: testGCDeletesSetting
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
public void testGCDeletesSetting() {
TimeValue gcDeleteSetting = new TimeValue(Math.abs(randomInt()), TimeUnit.MILLISECONDS);
IndexMetaData metaData = newIndexMeta("index", Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), gcDeleteSetting.getStringRep())
.build());
IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY);
assertEquals(TimeValue.parseTimeValue(gcDeleteSetting.getStringRep(), new TimeValue(1, TimeUnit.DAYS),
IndexSettings.INDEX_GC_DELETES_SETTING.getKey()).getMillis(), settings.getGcDeletesInMillis());
TimeValue newGCDeleteSetting = new TimeValue(Math.abs(randomInt()), TimeUnit.MILLISECONDS);
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(),
newGCDeleteSetting.getStringRep()).build()));
assertEquals(TimeValue.parseTimeValue(newGCDeleteSetting.getStringRep(), new TimeValue(1, TimeUnit.DAYS),
IndexSettings.INDEX_GC_DELETES_SETTING.getKey()).getMillis(), settings.getGcDeletesInMillis());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(),
randomBoolean() ? -1 : new TimeValue(-1, TimeUnit.MILLISECONDS)).build()));
assertEquals(-1, settings.getGcDeletesInMillis());
}
示例15: getSessions
import org.elasticsearch.common.unit.TimeValue; //导入依赖的package包/类
/**
* getSessions: Get sessions from logs
*
* @param props
* the Mudrod configuration
* @param es
* the Elasticsearch driver
* @param logIndex
* a log index name
* @return list of session names
*/
protected List<String> getSessions(Properties props, ESDriver es, String logIndex) {
String cleanupPrefix = props.getProperty(MudrodConstants.CLEANUP_TYPE_PREFIX);
String sessionStatPrefix = props.getProperty(MudrodConstants.SESSION_STATS_PREFIX);
List<String> sessions = new ArrayList<>();
SearchResponse scrollResp = es.getClient().prepareSearch(logIndex).setTypes(sessionStatPrefix).setScroll(new TimeValue(60000)).setQuery(QueryBuilders.matchAllQuery()).setSize(100).execute()
.actionGet();
while (true) {
for (SearchHit hit : scrollResp.getHits().getHits()) {
Map<String, Object> session = hit.getSource();
String sessionID = (String) session.get("SessionID");
sessions.add(sessionID + "," + logIndex + "," + cleanupPrefix);
}
scrollResp = es.getClient().prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(600000)).execute().actionGet();
if (scrollResp.getHits().getHits().length == 0) {
break;
}
}
return sessions;
}