本文整理汇总了Java中org.apache.solr.response.SolrQueryResponse.add方法的典型用法代码示例。如果您正苦于以下问题:Java SolrQueryResponse.add方法的具体用法?Java SolrQueryResponse.add怎么用?Java SolrQueryResponse.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.solr.response.SolrQueryResponse
的用法示例。
在下文中一共展示了SolrQueryResponse.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMatchesFromQuery
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
private RFResult getMatchesFromQuery(SolrQueryResponse rsp, SolrParams params, int flags, String q, Query query, Query userQuery, SortSpec sortSpec, List<Query> targetFqFilters, List<Query> rfFqFilters, SolrIndexSearcher searcher, RFHelper rfHelper, int start, int rows) throws IOException, SyntaxError {
boolean includeMatch = params.getBool(RFParams.MATCH_INCLUDE, true);
int matchOffset = params.getInt(RFParams.MATCH_OFFSET, 0);
// Find the base match
DocList match = searcher.getDocList(query, targetFqFilters, null, matchOffset, 10000, flags); // only get the first one...
if(match.matches() == 0 && userQuery == null){
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
String.format("RelevancyFeedback was unable to find any documents matching the query: '%s'.", q));
}
if (includeMatch) {
rsp.add("match", match);
}
// This is an iterator, but we only handle the first match
DocIterator iterator = match.iterator();
if (iterator.hasNext() || userQuery != null) {
// do a RelevancyFeedback query for each document in results
return rfHelper.getMatchesFromDocs(iterator, start, rows, rfFqFilters, flags, sortSpec.getSort(), userQuery);
}
return null;
}
示例2: handleRandomSearch
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
/**
* Returns a random set of documents from the index. Mainly for testing purposes.
*
* @param req
* @param rsp
* @throws IOException
*/
private void handleRandomSearch(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
SolrIndexSearcher searcher = req.getSearcher();
Query query = new MatchAllDocsQuery();
DocList docList = searcher.getDocList(query, getFilterQueries(req), Sort.RELEVANCE, 0, numberOfCandidateResults, 0);
int paramRows = Math.min(req.getParams().getInt("rows", defaultNumberOfResults), docList.size());
if (docList.size() < 1) {
rsp.add("Error", "No documents in index");
} else {
LinkedList list = new LinkedList();
while (list.size() < paramRows) {
DocList auxList = docList.subset((int) (Math.random() * docList.size()), 1);
Document doc = null;
for (DocIterator it = auxList.iterator(); it.hasNext(); ) {
doc = searcher.doc(it.nextDoc());
}
if (!list.contains(doc)) {
list.add(doc);
}
}
rsp.addResponse(list);
}
}
示例3: doCompact
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
private void doCompact(LazyDocumentLoader documentLoader,
LazyCompactor compactor,
SolrQueryResponse rsp,
String tsId,
String q) throws IOException, SyntaxError {
Query query = depProvider.parser(q).getQuery();
Iterable<Document> docs = documentLoader.load(query, SORT);
Iterable<CompactionResult> compactionResults = compactor.compact(docs);
List<Document> docsToDelete = new LinkedList<>();
List<SolrInputDocument> docsToAdd = new LinkedList<>();
compactionResults.forEach(it -> {
docsToDelete.addAll(it.getInputDocuments());
docsToAdd.addAll(it.getOutputDocuments());
});
depProvider.solrUpdateService().add(docsToAdd);
depProvider.solrUpdateService().delete(docsToDelete);
rsp.add("timeseries " + tsId + " oldNumDocs:", docsToDelete.size());
rsp.add("timeseries " + tsId + " newNumDocs:", docsToAdd.size());
}
示例4: olderDocumentsExists
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
/**
* Searches the index, if older documents exists. Updates the solr query response.
*
* @param req - the solr query request information
* @param rsp - the solr query response information
* @return true if the hit count is greater zero, otherwise false
* @throws SyntaxError, IOException if bad things happen
*/
private boolean olderDocumentsExists(String queryString, SolrQueryRequest req, SolrQueryResponse rsp) throws SyntaxError, IOException {
String defType = req.getParams().get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);
QParser queryParser = QParser.getParser(queryString, defType, req);
Query query = queryParser.getQuery();
TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
req.getSearcher().search(query, totalHitCountCollector);
rsp.add("query", String.format("%s:[* TO NOW-%s]", queryField, timeSeriesAge));
rsp.add("queryTechnical", queryString);
rsp.add("removedDocuments", totalHitCountCollector.getTotalHits());
return totalHitCountCollector.getTotalHits() != 0;
}
示例5: doGet
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@Override
public void doGet(BaseSolrResource endpoint, String childId) {
SolrQueryResponse response = endpoint.getSolrResponse();
if (childId != null) {
boolean ignoreCase = getIgnoreCase();
String key = applyCaseSetting(ignoreCase, childId);
// if ignoreCase==true, then we get the mappings using the lower-cased key
// and then return a union of all case-sensitive keys, if false, then
// we only return the mappings for the exact case requested
CasePreservedSynonymMappings cpsm = synonymMappings.get(key);
Set<String> mappings = (cpsm != null) ? cpsm.getMappings(ignoreCase, childId) : null;
if (mappings == null)
throw new SolrException(ErrorCode.NOT_FOUND,
String.format(Locale.ROOT, "%s not found in %s", childId, getResourceId()));
response.add(childId, mappings);
} else {
response.add(SYNONYM_MAPPINGS, buildMapToStore(getStoredView()));
}
}
示例6: getMoreLikeTheseFromQuery
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
private MLTResult getMoreLikeTheseFromQuery(SolrQueryResponse rsp, SolrParams params, int flags, String q, Query query, SortSpec sortSpec, List<Query> targetFqFilters, List<Query> mltFqFilters, SolrIndexSearcher searcher, MoreLikeThisHelper mlt, int start, int rows) throws IOException, SyntaxError {
boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true);
int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
// Find the base match
DocList match = searcher.getDocList(query, targetFqFilters, null, matchOffset, 10000, flags); // only get the first one...
if(match.matches() == 0){
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
String.format("MoreLikeThis was unable to find any documents matching the query: '%s'.", q));
}
if (includeMatch) {
rsp.add("match", match);
}
// This is an iterator, but we only handle the first match
DocIterator iterator = match.iterator();
if (iterator.hasNext()) {
// do a MoreLikeThis query for each document in results
return mlt.getMoreLikeTheseFromDocs(iterator, start, rows, mltFqFilters, flags, sortSpec.getSort());
}
return null;
}
示例7: expandQueryAndReExecute
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
private MLTResult expandQueryAndReExecute(SolrQueryResponse rsp, SolrParams params, int maxDocumentsToMatch, int flags, String q, Query seedQuery, SortSpec sortSpec, List<Query> targetFqFilters, List<Query> mltFqFilters, SolrIndexSearcher searcher, UnsupervisedFeedbackHelper uff, int start, int rows) throws IOException, SyntaxError {
boolean includeMatch = params.getBool(UnsupervisedFeedbackParams.MATCH_INCLUDE, true);
int matchOffset = params.getInt(UnsupervisedFeedbackParams.MATCH_OFFSET, 0);
// Find the base match
DocList match = searcher.getDocList(seedQuery, targetFqFilters, null, matchOffset, maxDocumentsToMatch, flags); // only get the first one...
if(match.matches() == 0){
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
String.format("Unsupervised feedback handler was unable to find any documents matching the seed query: '%s'.", q));
}
if (includeMatch) {
rsp.add("match", match);
}
// This is an iterator, but we only handle the first match
DocIterator iterator = match.iterator();
MLTResult mltResult = null;
if (iterator.hasNext()) {
// do a MoreLikeThis query for each document in results
mltResult = uff.expandQueryAndReExecute(iterator, seedQuery, start, rows, mltFqFilters, flags, sortSpec.getSort());
}
return mltResult;
}
示例8: addFacet
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
private void addFacet(SolrQueryRequest req, SolrQueryResponse rsp, SolrParams params, DocListAndSet rfDocs) {
if( rfDocs.docSet == null ) {
rsp.add( "facet_counts", null );
}
else {
FacetComponent fct = new FacetComponent();
rsp.add( "facet_counts", fct.getFacetCounts(new SimpleFacets(req, rfDocs.docSet, params )) );
}
}
示例9: handleRequestBody
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
rsp.setHttpCaching(false);
final SolrParams solrParams = req.getParams();
String dicPath = solrParams.get("dicPath");
Dictionary dict = Utils.getDict(dicPath, loader);
NamedList<Object> result = new NamedList<Object>();
result.add("dicPath", dict.getDicPath().toURI());
boolean check = solrParams.getBool("check", false); //仅仅用于检测词库是否有变化
//用于尝试加载词库,有此参数, check 参数可以省略。
boolean reload = solrParams.getBool("reload", false);
check |= reload;
boolean changed = false;
boolean reloaded = false;
if(check) {
changed = dict.wordsFileIsChange();
result.add("changed", changed);
}
if(changed && reload) {
reloaded = dict.reload();
result.add("reloaded", reloaded);
}
rsp.add("result", result);
}
示例10: addComponentResponsesToResult
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
public static void addComponentResponsesToResult(SolrQueryResponse rsp, SolrQueryResponse newRsp, String prefix) {
rsp.add(prefix + "_response", newRsp.getValues().get("response"));
rsp.add(prefix + "_grouped", newRsp.getValues().get("grouped"));
rsp.add(prefix + "_facet_counts", newRsp.getValues().get("facet_counts"));
rsp.add(prefix + "_terms", newRsp.getValues().get("terms"));
rsp.add(prefix + "_termVectors", newRsp.getValues().get("termVectors"));
rsp.add(prefix + "_highlighting", newRsp.getValues().get("highlighting"));
rsp.add(prefix + "_stats", newRsp.getValues().get("stats"));
}
示例11: handleRequestBody
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@Override
public void handleRequestBody(SolrQueryRequest solrReq, SolrQueryResponse solrRsp)
throws Exception {
KnowledgeGraphRequest request = parsePost(solrReq);
new RequestValidator(solrReq, request).validate();
ParameterSet parameterSet = new ParameterSet(solrReq.getParams(), defaults, invariants);
NodeContext context = new NodeContext(request, solrReq, parameterSet);
RequestTreeRecurser recurser = new RequestTreeRecurser(context);
KnowledgeGraphResponse response = new KnowledgeGraphResponse();
response.data = recurser.score();
solrRsp.add("relatednessResponse", response);
}
示例12: parseResponse
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@Test
public void parseResponse() {
SolrQueryResponse resp = new SolrQueryResponse();
SimpleOrderedMap<Object> root = new SimpleOrderedMap<>();
SimpleOrderedMap<Object> queryFacet= new SimpleOrderedMap<>();
SimpleOrderedMap<Object> fieldFacet= new SimpleOrderedMap<>();
LinkedList<Object> buckets = new LinkedList<>();
SimpleOrderedMap<Object> bucket1 = new SimpleOrderedMap<>();
SimpleOrderedMap<Object> bucket2 = new SimpleOrderedMap<>();
bucket1.add("val", "testValue1");
bucket1.add("count", 1234);
bucket2.add("val", "testValue2");
bucket2.add("count", 4321);
buckets.add(bucket1);
buckets.add(bucket2);
fieldFacet.add("buckets", buckets);
queryFacet.add(AggregationWaitable.FIELD_FACET_NAME, fieldFacet);
root.add(AggregationWaitable.QUERY_FACET_NAME, queryFacet);
resp.add("facets", root);
LinkedList<ResponseValue> expected = new LinkedList<>();
expected.add(new ResponseValue("testValue1", 1234));
expected.add(new ResponseValue("testValue2", 4321));
AggregationWaitable target = new AggregationWaitable(context, adapter, "query", "testfield", 0, 0);
Deencapsulation.invoke(target, "parseResponse", resp);
Assert.assertEquals(expected.size(), target.buckets.size());
for(int i = 0; i < target.buckets.size(); ++i)
{
Assert.assertEquals(buckets.get(i), target.buckets.get(i));
}
}
示例13: handleRequestBody
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@Override
@SuppressWarnings("PMD.SignatureDeclareThrowsException")
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
String joinKey = req.getParams().get(JOIN_KEY);
String fq = req.getParams().get(FQ);
int ppc = req.getParams().getInt(POINTS_PER_CHUNK, 10000);
int pageSize = req.getParams().getInt(PAGE_SIZE, 100);
depProvider.init(req, rsp);
LazyCompactor compactor = depProvider.compactor(ppc, req.getSearcher().getSchema());
LazyDocumentLoader documentLoader = depProvider.documentLoader(pageSize, req.getSearcher());
if (isBlank(joinKey) && isBlank(fq)) {
LOGGER.error("Neither join key nor filter query given.");
rsp.add("error",
join("", "Neither join key nor filter query given.",
"Get help at https://chronix.gitbooks.io/chronix/content/document_compaction.html."));
return;
}
//no join key => compact documents matching fq
if (isBlank(joinKey)) {
compact(documentLoader, compactor, rsp, fq, fq);
depProvider.solrUpdateService().commit();
return;
}
//determine time series identified by joinKey
SolrFacetService facetService = depProvider.solrFacetService();
Query filterQuery = isBlank(fq) ? new MatchAllDocsQuery() : depProvider.parser(fq).getQuery();
List<NamedList<Object>> pivotResult = facetService.pivot(joinKey, filterQuery);
//compact each time series' constituting documents
facetService.toTimeSeriesIds(pivotResult)
.parallelStream()
.forEach(tsId -> compact(documentLoader, compactor, rsp, tsId.toString(), and(tsId.toQuery(), fq)));
depProvider.solrUpdateService().commit();
}
示例14: testZeroTries
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@Test
public void testZeroTries() throws Exception
{
SolrCore core = h.getCore();
SearchComponent speller = core.getSearchComponent("spellcheck");
assertTrue("speller is null and it shouldn't be", speller != null);
ModifiableSolrParams params = new ModifiableSolrParams();
params.add(SpellCheckComponent.COMPONENT_NAME, "true");
params.add(SpellCheckComponent.SPELLCHECK_BUILD, "true");
params.add(SpellCheckComponent.SPELLCHECK_COUNT, "10");
params.add(SpellCheckComponent.SPELLCHECK_COLLATE, "true");
params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATION_TRIES, "0");
params.add(SpellCheckComponent.SPELLCHECK_MAX_COLLATIONS, "2");
params.add(CommonParams.Q, "lowerfilt:(+fauth)");
SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH");
SolrQueryResponse rsp = new SolrQueryResponse();
rsp.add("responseHeader", new SimpleOrderedMap());
SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
handler.handleRequest(req, rsp);
req.close();
NamedList values = rsp.getValues();
NamedList spellCheck = (NamedList) values.get("spellcheck");
NamedList suggestions = (NamedList) spellCheck.get("suggestions");
List<String> collations = suggestions.getAll("collation");
assertTrue(collations.size() == 2);
}
示例15: handleRequestBody
import org.apache.solr.response.SolrQueryResponse; //导入方法依赖的package包/类
@SuppressWarnings("PMD.SignatureDeclareThrowsException")
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
super.handleRequestBody(req, rsp);
// Add empty 'errors' field, otherwise the KairosDB client crashes.
rsp.add("errors", new String[0]);
}