本文整理汇总了Java中org.elasticsearch.common.settings.Settings.getAsInt方法的典型用法代码示例。如果您正苦于以下问题:Java Settings.getAsInt方法的具体用法?Java Settings.getAsInt怎么用?Java Settings.getAsInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.common.settings.Settings
的用法示例。
在下文中一共展示了Settings.getAsInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: StandardAnalyzerProvider
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public StandardAnalyzerProvider(Index index, Settings indexSettings, Environment env, String name, Settings settings) {
super(index, indexSettings, name, settings);
this.esVersion = Version.indexCreated(indexSettings);
final CharArraySet defaultStopwords;
if (esVersion.onOrAfter(Version.V_1_0_0_Beta1)) {
defaultStopwords = CharArraySet.EMPTY_SET;
} else {
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
}
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
standardAnalyzer = new StandardAnalyzer(stopWords);
standardAnalyzer.setVersion(version);
standardAnalyzer.setMaxTokenLength(maxTokenLength);
}
示例2: NodesFailureDetectionService
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public NodesFailureDetectionService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, ClusterService clusterService,
RoutingService routingService, JoinClusterAction joinClusterAction, ClusterStateOpLog clusterStateOpLog) {
super(settings);
this.pingInterval = settings.getAsTime(SETTING_PING_INTERVAL, timeValueSeconds(1));
this.pingTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(5));
this.pingRetryCount = settings.getAsInt(SETTING_PING_RETRIES, 3);
this.threadPool = threadPool;
this.transportService = transportService;
this.clusterName = clusterName;
this.clusterService = clusterService;
this.routingService = routingService;
this.joinClusterAction = joinClusterAction;
this.clusterStateOpLog = clusterStateOpLog;
this.localNode = clusterService.localNode();
logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingTimeout, pingRetryCount);
transportService.registerRequestHandler(PING_ACTION_NAME, PingRequest.class, ThreadPool.Names.SAME, new PingRequestHandler());
}
示例3: ElectMasterService
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
@Inject
public ElectMasterService(Settings settings, Version version) {
super(settings);
this.minMasterVersion = version.minimumCompatibilityVersion();
this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES,
DEFAULT_DISCOVERY_ZEN_MINIMUM_MASTER_NODES);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}
示例4: AbstractIndexWriterProjection
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
protected AbstractIndexWriterProjection(TableIdent tableIdent,
@Nullable String partitionIdent,
List<ColumnIdent> primaryKeys,
@Nullable ColumnIdent clusteredByColumn,
Settings settings,
boolean autoCreateIndices) {
this.tableIdent = tableIdent;
this.partitionIdent = partitionIdent;
this.primaryKeys = primaryKeys;
this.clusteredByColumn = clusteredByColumn;
this.autoCreateIndices = autoCreateIndices;
this.bulkActions = settings.getAsInt(BULK_SIZE, BULK_SIZE_DEFAULT);
Preconditions.checkArgument(bulkActions > 0, "\"bulk_size\" must be greater than 0.");
}
示例5: visitCreateTenant
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
@Override
public CreateTenantAnalyzedStatement visitCreateTenant(CreateTenant node, Analysis context) {
// Add SQL Authentication
UserProperty currentOperateUser = context.parameterContext().userProperty();
if (!currentOperateUser.getUsernameWithoutTenant().equalsIgnoreCase(UserProperty.ROOT_NAME)) {
throw new NoPermissionException(RestStatus.FORBIDDEN.getStatus(), "only root have permission to create tenant");
}
Settings settings = GenericPropertiesConverter.settingsFromProperties(
node.properties(), context.parameterContext(), SETTINGS).build();
CreateTenantAnalyzedStatement statement = new CreateTenantAnalyzedStatement(node.name(),
settings.get(TenantSettings.SUPERUSER_PASSWORD.name()),
settings.getAsInt(TenantSettings.NUMBER_OF_INSTANCES.name(), TenantSettings.NUMBER_OF_INSTANCES.defaultValue()),
settings.get(TenantSettings.INSTANCE_LIST.name()));
return statement;
}
示例6: JoinClusterAction
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public JoinClusterAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, RoutingService routingService) {
super(settings);
this.pingTimeout = this.settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(3));
this.joinTimeout = settings.getAsTime(SETTING_JOIN_TIMEOUT, TimeValue.timeValueMillis(this.pingTimeout.millis() * 20));
this.joinRetryAttempts = settings.getAsInt(SETTING_JOIN_RETRY_ATTEMPTS, 3);
this.joinRetryDelay = settings.getAsTime(SETTING_JOIN_RETRY_DELAY, TimeValue.timeValueMillis(100));
this.transportService = transportService;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.routingService = routingService;
transportService.registerRequestHandler(JOIN_ACTION_NAME, JoinRequest.class, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
}
示例7: StandardAnalyzerProvider
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
standardAnalyzer = new StandardAnalyzer(stopWords);
standardAnalyzer.setVersion(version);
standardAnalyzer.setMaxTokenLength(maxTokenLength);
}
示例8: ShingleTokenFilterFactory
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", true, deprecationLogger);
Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams_if_no_shingles", false, deprecationLogger);
String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR);
String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN);
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken);
}
示例9: MergePolicyConfig
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public MergePolicyConfig(ESLogger logger, Settings indexSettings) {
this.logger = logger;
this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
ByteSizeValue floorSegment = indexSettings.getAsBytesSize("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT);
int maxMergeAtOnce = indexSettings.getAsInt("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE);
int maxMergeAtOnceExplicit = indexSettings.getAsInt("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
// TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
ByteSizeValue maxMergedSegment = indexSettings.getAsBytesSize("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT);
double segmentsPerTier = indexSettings.getAsDouble("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER);
double reclaimDeletesWeight = indexSettings.getAsDouble("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT);
this.mergesEnabled = indexSettings.getAsBoolean(INDEX_MERGE_ENABLED, true);
if (mergesEnabled == false) {
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
}
maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
mergePolicy.setNoCFSRatio(noCFSRatio);
mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
mergePolicy.setSegmentsPerTier(segmentsPerTier);
mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
logger.debug("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}]",
forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight);
}
示例10: PatternTokenizerFactory
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
if (sPattern == null) {
throw new IllegalArgumentException("pattern is missing for [" + name + "] tokenizer of type 'pattern'");
}
this.pattern = Regex.compile(sPattern, settings.get("flags"));
this.group = settings.getAsInt("group", -1);
}
示例11: TruncateTokenFilterFactory
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public TruncateTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.length = settings.getAsInt("length", -1);
if (length <= 0) {
throw new IllegalArgumentException("length parameter must be provided");
}
}
示例12: onRefreshSettings
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
@Override
public void onRefreshSettings(Settings settings) {
Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE,
ShardsLimitAllocationDecider.this.settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, DEFAULT_SHARD_LIMIT));
if (newClusterLimit != clusterShardLimit) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE,
ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit);
ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit;
}
}
示例13: UAX29URLEmailTokenizerFactory
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
@Inject
public UAX29URLEmailTokenizerFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettingsService.getSettings(), name, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}
示例14: StandardTokenizerFactory
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
public StandardTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}
示例15: ShardsLimitAllocationDecider
import org.elasticsearch.common.settings.Settings; //导入方法依赖的package包/类
@Inject
public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, DEFAULT_SHARD_LIMIT);
nodeSettingsService.addListener(new ApplySettings());
}