本文整理汇总了Java中org.apache.lucene.search.Explanation类的典型用法代码示例。如果您正苦于以下问题:Java Explanation类的具体用法?Java Explanation怎么用?Java Explanation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Explanation类属于org.apache.lucene.search包,在下文中一共展示了Explanation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: duelRun
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException {
boolean requireScore = randomBoolean();
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
Query percolateQuery = fieldType.percolateQuery("type", queryStore, new BytesArray("{}"), percolateSearcher);
Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery);
TopDocs topDocs = shardSearcher.search(query, 10);
Query controlQuery = new ControlQuery(memoryIndex, queryStore);
controlQuery = requireScore ? controlQuery : new ConstantScoreQuery(controlQuery);
TopDocs controlTopDocs = shardSearcher.search(controlQuery, 10);
assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits));
assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length));
for (int j = 0; j < topDocs.scoreDocs.length; j++) {
assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc));
assertThat(topDocs.scoreDocs[j].score, equalTo(controlTopDocs.scoreDocs[j].score));
if (requireScore) {
Explanation explain1 = shardSearcher.explain(query, topDocs.scoreDocs[j].doc);
Explanation explain2 = shardSearcher.explain(controlQuery, controlTopDocs.scoreDocs[j].doc);
assertThat(explain1.isMatch(), equalTo(explain2.isMatch()));
assertThat(explain1.getValue(), equalTo(explain2.getValue()));
}
}
}
示例2: hitExecute
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
if (context.explain() == false) {
return;
}
try {
final int topLevelDocId = hitContext.hit().docId();
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext rescore : context.rescore()) {
explanation = rescore.rescorer().explain(topLevelDocId, context, rescore, explanation);
}
// we use the top level doc id, since we work with the top level searcher
hitContext.hit().explanation(explanation);
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().getType() + "#"
+ hitContext.hit().getId() + "]", e);
} finally {
context.clearReleasables(SearchContext.Lifetime.COLLECTION);
}
}
示例3: explain
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public Explanation explain(LeafReaderContext context, int doc,
float finalScore, List<Explanation> featureExplanations) {
String modelDescription = "";
for (int layer = 0; layer < weightMatrices.size(); layer++) {
float[][] weightMatrix = weightMatrices.get(layer);
int numRows = weightMatrix.length;
int numCols = weightMatrix[0].length;
if (layer == 0) {
modelDescription += String.format("Input has %1$d features.", numCols - 1);
} else {
modelDescription += String.format("%nHidden layer #%1$d has %2$d units.", layer, numCols);
}
}
return Explanation.match(finalScore, modelDescription);
}
示例4: assertAllSearchWorks
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
void assertAllSearchWorks(String indexName) {
logger.info("--> testing _all search");
SearchResponse searchRsp = client().prepareSearch(indexName).get();
ElasticsearchAssertions.assertNoFailures(searchRsp);
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
SearchHit bestHit = searchRsp.getHits().getAt(0);
// Make sure there are payloads and they are taken into account for the score
// the 'string' field has a boost of 4 in the mappings so it should get a payload boost
String stringValue = (String) bestHit.getSourceAsMap().get("string");
assertNotNull(stringValue);
Explanation explanation = client().prepareExplain(indexName, bestHit.getType(), bestHit.getId())
.setQuery(QueryBuilders.matchQuery("_all", stringValue)).get().getExplanation();
assertTrue("Could not find payload boost in explanation\n" + explanation, findPayloadBoostInExplanation(explanation));
// Make sure the query can run on the whole index
searchRsp = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("_all", stringValue)).setExplain(true).get();
ElasticsearchAssertions.assertNoFailures(searchRsp);
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
}
示例5: explainTFNorm
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
private Explanation explainTFNorm(int doc, Explanation freq, BM25StatsFixed stats, NumericDocValues norms) {
List<Explanation> subs = new ArrayList<>();
subs.add(freq);
subs.add(Explanation.match(k1, "parameter k1"));
if (norms == null) {
subs.add(Explanation.match(0, "parameter b (norms omitted for field)"));
return Explanation.match(
(freq.getValue() * (k1 + 1)) / (freq.getValue() + k1),
"tfNorm, computed from:", subs);
} else {
float doclen = norms.get(doc);
subs.add(Explanation.match(b, "parameter b"));
subs.add(Explanation.match(stats.avgdl, "avgFieldLength"));
subs.add(Explanation.match(doclen, "fieldLength"));
return Explanation.match(
(freq.getValue() * (k1 + 1)) / (freq.getValue() + k1 * (1 - b + b * doclen/stats.avgdl)),
"tfNorm, computed from:", subs);
}
}
开发者ID:sebastian-hofstaetter,项目名称:ir-generalized-translation-models,代码行数:20,代码来源:BM25SimilarityLossless.java
示例6: explain
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
protected void explain(Explanation expl, BasicStats stats, int doc,
float freq, float docLen) {
if (stats.getTotalBoost() != 1.0f) {
expl.addDetail(new Explanation(stats.getTotalBoost(), "boost"));
}
expl.addDetail(new Explanation(mu, "mu"));
Explanation weightExpl = new Explanation();
weightExpl.setValue((float)Math.log(1 + freq /
(mu * ((LMStats)stats).getCollectionProbability())));
weightExpl.setDescription("term weight");
expl.addDetail(weightExpl);
expl.addDetail(new Explanation(
(float)Math.log(mu / (docLen + mu)), "document norm"));
super.explain(expl, stats, doc, freq, docLen);
}
示例7: hitExecute
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
try {
final int topLevelDocId = hitContext.hit().docId();
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext rescore : context.rescore()) {
explanation = rescore.rescorer().explain(topLevelDocId, context, rescore, explanation);
}
// we use the top level doc id, since we work with the top level searcher
hitContext.hit().explanation(explanation);
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().type() + "#" + hitContext.hit().id() + "]", e);
} finally {
context.clearReleasables(SearchContext.Lifetime.COLLECTION);
}
}
示例8: explain
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
Explanation expl;
if (function != null) {
Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
expl = combineFunction.explain(subQueryExpl, functionExplanation, maxBoost);
} else {
expl = subQueryExpl;
}
if (minScore != null && minScore > expl.getValue()) {
expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl);
}
return expl;
}
示例9: explain
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public Explanation explain(Query query, int doc) throws IOException {
if (aggregatedDfs != null) {
// dfs data is needed to explain the score
return super.explain(createNormalizedWeight(query, true), doc);
}
return in.explain(query, doc);
}
示例10: explain
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
public Explanation explain(int topLevelDocId, SearchContext context, RescoreSearchContext rescoreContext,
Explanation sourceExplanation) throws IOException {
QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext;
ContextIndexSearcher searcher = context.searcher();
if (sourceExplanation == null) {
// this should not happen but just in case
return Explanation.noMatch("nothing matched");
}
// TODO: this isn't right? I.e., we are incorrectly pretending all first pass hits were rescored? If the requested docID was
// beyond the top rescoreContext.window() in the first pass hits, we don't rescore it now?
Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId);
float primaryWeight = rescore.queryWeight();
Explanation prim;
if (sourceExplanation.isMatch()) {
prim = Explanation.match(
sourceExplanation.getValue() * primaryWeight,
"product of:", sourceExplanation, Explanation.match(primaryWeight, "primaryWeight"));
} else {
prim = Explanation.noMatch("First pass did not match", sourceExplanation);
}
// NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. Maybe
// we should add QueryRescorer.explainCombine to Lucene?
if (rescoreExplain != null && rescoreExplain.isMatch()) {
float secondaryWeight = rescore.rescoreQueryWeight();
Explanation sec = Explanation.match(
rescoreExplain.getValue() * secondaryWeight,
"product of:",
rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight"));
QueryRescoreMode scoreMode = rescore.scoreMode();
return Explanation.match(
scoreMode.combine(prim.getValue(), sec.getValue()),
scoreMode + " of:",
prim, sec);
} else {
return prim;
}
}
示例11: parseExplanation
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
private static Explanation parseExplanation(XContentParser parser) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
XContentParser.Token token;
Float value = null;
String description = null;
List<Explanation> details = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, () -> parser.getTokenLocation());
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (Fields.VALUE.equals(currentFieldName)) {
value = parser.floatValue();
} else if (Fields.DESCRIPTION.equals(currentFieldName)) {
description = parser.textOrNull();
} else if (Fields.DETAILS.equals(currentFieldName)) {
ensureExpectedToken(XContentParser.Token.START_ARRAY, token, () -> parser.getTokenLocation());
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
details.add(parseExplanation(parser));
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
if (value == null) {
throw new ParsingException(parser.getTokenLocation(), "missing explanation value");
}
if (description == null) {
throw new ParsingException(parser.getTokenLocation(), "missing explanation description");
}
return Explanation.match(value, description, details);
}
示例12: buildExplanation
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
private void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException {
builder.startObject();
builder.field(Fields.VALUE, explanation.getValue());
builder.field(Fields.DESCRIPTION, explanation.getDescription());
Explanation[] innerExps = explanation.getDetails();
if (innerExps != null) {
builder.startArray(Fields.DETAILS);
for (Explanation exp : innerExps) {
buildExplanation(builder, exp);
}
builder.endArray();
}
builder.endObject();
}
示例13: readExplanation
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
public static Explanation readExplanation(StreamInput in) throws IOException {
boolean match = in.readBoolean();
String description = in.readString();
final Explanation[] subExplanations = new Explanation[in.readVInt()];
for (int i = 0; i < subExplanations.length; ++i) {
subExplanations[i] = readExplanation(in);
}
if (match) {
return Explanation.match(in.readFloat(), description, subExplanations);
} else {
return Explanation.noMatch(description, subExplanations);
}
}
示例14: writeExplanation
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException {
out.writeBoolean(explanation.isMatch());
out.writeString(explanation.getDescription());
Explanation[] subExplanations = explanation.getDetails();
out.writeVInt(subExplanations.length);
for (Explanation subExp : subExplanations) {
writeExplanation(out, subExp);
}
if (explanation.isMatch()) {
out.writeFloat(explanation.getValue());
}
}
示例15: shardOperation
import org.apache.lucene.search.Explanation; //导入依赖的package包/类
@Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = null;
try {
result = context.indexShard().get(new Engine.Get(false, uidTerm));
if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext ctx : context.rescore()) {
Rescorer rescorer = ctx.rescorer();
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
}
if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
}
} catch (IOException e) {
throw new ElasticsearchException("Could not explain", e);
} finally {
Releasables.close(result, context);
}
}