本文整理汇总了Java中org.apache.lucene.search.Hits.doc方法的典型用法代码示例。如果您正苦于以下问题:Java Hits.doc方法的具体用法?Java Hits.doc怎么用?Java Hits.doc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.Hits
的用法示例。
在下文中一共展示了Hits.doc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addField
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
public boolean addField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
return false;
}
Document doc=hits.doc(0);
IndexWriter iw = getIndexWriter();
Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
doc.add(f);
iw.updateDocument(new Term("ID", recID), doc);
} catch (IOException ex) {
log.fatal(ex);
return false;
}
return true;
}
示例2: addField
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
public boolean addField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
return false;
}
Document doc=hits.doc(0);
IndexWriter iw = getIndexWriter();
Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
doc.add(f);
iw.updateDocument(new Term("ID", recID), doc);
iw.close();
} catch (IOException ex) {
log.fatal(ex);
return false;
}
return true;
}
示例3: deleteField
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
public void deleteField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
log.fatal("greska pri brisanju polja. Zapis: "+recID);
return ;
}
Document doc=hits.doc(0);
Field [] fields=doc.getFields(prefix);
IndexWriter iw = getIndexWriter();
doc.removeFields(prefix);
for(int i=0;i<fields.length;i++){
if(!fields[i].stringValue().equals(value)){
doc.add(fields[i]);
}
}
iw.updateDocument(new Term("ID", recID), doc);
iw.close();
} catch (IOException ex) {
log.fatal(ex);
}
}
示例4: verify
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
private void verify(Directory dir, int expectedHits) throws IOException {
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
int numHits = hits.length();
assertEquals(expectedHits, numHits);
int[] docs = new int[numHits];
for (int i = 0; i < numHits; i++) {
Document hit = hits.doc(i);
docs[Integer.parseInt(hit.get("id"))]++;
}
for (int i = 0; i < numHits; i++) {
assertEquals(1, docs[i]);
}
searcher.close();
}
示例5: debugExplainResults
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
private void debugExplainResults(String indexName, Hits hits, IndexSearcher searcher,
Query q, Set<Term> queryTerms)
throws IOException {
log.debug("Parsed Query is " + q.toString());
log.debug("Looking at index: " + indexName);
for (int i = 0; i < hits.length(); i++) {
if ((i < 10)) {
Document doc = hits.doc(i);
Float score = hits.score(i);
Explanation ex = searcher.explain(q, hits.id(i));
log.debug("Looking at hit<" + i + ", " + hits.id(i) + ", " + score +
">: " + doc);
log.debug("Explanation: " + ex);
MatchingField match = new MatchingField(q.toString(), doc, queryTerms);
String fieldName = match.getFieldName();
String fieldValue = match.getFieldValue();
log.debug("Guessing that matched fieldName is " + fieldName + " = " +
fieldValue);
}
}
}
示例6: updateDocument
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
public void updateDocument(String documentId, String fieldName,
String fieldValue) {
try {
SearchBean sbean = new SearchBean();
Hits hresult = sbean.skynetsearch(documentId, "DocumentId");
Document doc = hresult.doc(0);
ArrayList<DocumentFields> docfields = new ArrayList<DocumentFields>();
for (int i = 0; i < fields.length; i++) {
DocumentFields docFields = new DocumentFields();
docFields.SetFieldName(fields[i]);
if (fields[i].equalsIgnoreCase(fieldName)) {
docFields.SetFieldValue(fieldValue);
} else {
docFields.SetFieldValue(doc.get(fields[i]));
}
docfields.add(docFields);
}
DeleteIndex(documentId);
CreateIndex(docfields);
} catch (Exception ex) {
System.out.print(ex.toString());
}
}
示例7: verify
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
private void verify(Shard[] shards) throws IOException {
// verify the index
IndexReader[] readers = new IndexReader[shards.length];
for (int i = 0; i < shards.length; i++) {
Directory dir =
new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
false, conf);
readers[i] = IndexReader.open(dir);
}
IndexReader reader = new MultiReader(readers);
IndexSearcher searcher = new IndexSearcher(reader);
Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
assertEquals(0, hits.length());
hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
assertEquals(numDocsPerRun / 2, hits.length());
int[] counts = new int[numDocsPerRun];
for (int i = 0; i < hits.length(); i++) {
Document doc = hits.doc(i);
counts[Integer.parseInt(doc.get("id"))]++;
}
for (int i = 0; i < numDocsPerRun; i++) {
if (i % 2 == 0) {
assertEquals(0, counts[i]);
} else {
assertEquals(1, counts[i]);
}
}
searcher.close();
reader.close();
}
示例8: searchCorpus
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
/**
* Searches the current corpus using the search terms in the search field.
*/
private void searchCorpus() {
if (search.getText().trim().equals("")) return;
try {
indexSearcher = guess.getSelected() != null ?
getIndex(getDiffCorpus(gold.getSelected(), guess.getSelected())) :
getIndex(gold.getSelected());
//System.out.println("Searching...");
QueryParser parser = new QueryParser("Word", analyzer);
Query query = parser.parse(search.getText());
Hits hits = indexSearcher.search(query);
Highlighter highlighter = new Highlighter(new QueryScorer(query));
DefaultListModel model = new DefaultListModel();
for (int i = 0; i < hits.length(); i++) {
Document hitDoc = hits.doc(i);
int nr = Integer.parseInt(hitDoc.get("<nr>"));
//System.out.println(hitDoc.get("<nr>"));
String best = null;
for (Object field : hitDoc.getFields()) {
Field f = (Field) field;
best = highlighter.getBestFragment(analyzer, f.name(), hitDoc.get(f.name()));
if (best != null) break;
}
if (best != null)
model.addElement(new Result(nr, "<html>" + nr + ":" + best + "</html>"));
//System.out.println(highlighter.getBestFragment(analyzer, "Word", hitDoc.get("Word")));
//assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
}
results.setModel(model);
repaint();
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例9: iterate
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
/**
* Iterates search result
* @param hits a ranked list of found documents
*/
void iterate(Hits hits) {
for (int i = 0; i < hits.length(); i++) {
try {
result = hits.doc(i);
if (LOG.isLoggable(Level.FINE)) {
LOG.fine("Processing search result: " + result);
}
queryCallback.processRow(this); //notifying a callback
} catch (IOException e) {
throw new LuceneProviderException("Failed to get query result.",e);
}
}
}
示例10: displayHits
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
protected void displayHits(Hits hits) throws IOException {
for (int i = 0; i < hits.length(); i++) {
Document doc = hits.doc(i);
String name = doc.get("name");
String description = doc.get("description");
log.info("Hit<" + i + "> Score< " + hits.score(i) + "> name = <" +
name + "> description = <" + description + ">");
}
}
示例11: getPaths
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
private Set<List<String>> getPaths(Hits hits) throws IOException
{
Set<List<String>> paths = new HashSet<List<String>>();
for (int i = 0; i < hits.length(); i++)
{
Document doc = hits.doc(i);
String srcConcept =
doc.get(KnowledgeBasePathIndexField.START.toString());
String targetConcept =
doc.get(KnowledgeBasePathIndexField.END.toString());
String pathsString =
doc.get(KnowledgeBasePathIndexField.PATHS.toString());
if (!pathsString.isEmpty())
{
for (String pathString : pathsString.split(QUOTED_PATH_SEPARATOR))
{
List<String> path = new ArrayList<String>();
path.add(srcConcept);
for (String pathElement : pathString.split(" "))
path.add(pathElement);
path.add(targetConcept);
// sanity check
if ((path.size() % 3) != 1) log.warn("INVALID PATH: " + path);
else paths.add(path);
}
}
}
return paths;
}
示例12: main
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
public static void main(String[] args) {
try {
Searcher searcher = new IndexSearcher("index");
Analyzer analyzer = new StandardAnalyzer();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
while (true) {
System.out.print("Query: ");
String line = in.readLine();
if (line.length() == -1)
break;
Query query = QueryParser.parse(line, "contents", analyzer);
System.out.println("Searching for: " + query.toString("contents"));
Hits hits = searcher.search(query);
System.out.println(hits.length() + " total matching documents");
final int HITS_PER_PAGE = 10;
for (int start = 0; start < hits.length(); start += HITS_PER_PAGE) {
int end = Math.min(hits.length(), start + HITS_PER_PAGE);
for (int i = start; i < end; i++) {
Document doc = hits.doc(i);
String path = doc.get("path");
if (path != null) {
System.out.println(i + ". " + path);
} else {
String url = doc.get("url");
if (url != null) {
System.out.println(i + ". " + url);
System.out.println(" - " + doc.get("title"));
} else {
System.out.println(i + ". " + "No path nor URL for this document");
}
}
}
if (hits.length() > end) {
System.out.print("more (y/n) ? ");
line = in.readLine();
if (line.length() == 0 || line.charAt(0) == 'n')
break;
}
}
}
searcher.close();
} catch (Exception e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
示例13: test_process
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
/**
* Test
*/
public final void test_process() throws Exception {
// init current number of log files
final StepRun stepRun = cm.getStepRun(stepResultID());
final List logsBefore = cm.getAllStepLogs(stepRun.getID());
final int logCountBefore = logsBefore.size();
final int logCountExistsBefore = getExistsCount(logsBefore);
// call implementor's process logs method
if (log.isDebugEnabled()) log.debug("first run");
processLogs();
// get counters after processing
final List logsAfter = cm.getAllStepLogs(stepRun.getID());
final int logCountAfter = logsAfter.size();
final int logCountExistsAfter = getExistsCount(logsAfter);
// check if logs in the db
if (log.isDebugEnabled()) log.debug("logCountBefore = " + logCountBefore);
if (log.isDebugEnabled()) log.debug("logCountAfter = " + logCountAfter);
assertTrue("Number of logs after should be bigger then before", logCountAfter > logCountBefore);
assertEquals(1, logCountAfter - logCountBefore);
// check if logs are accesible from archive
assertEquals(logCountExistsAfter - logCountExistsBefore, logCountAfter - logCountBefore);
// run second time to make sure same results are not picked twice.
if (log.isDebugEnabled()) log.debug("second run");
processLogs();
assertEquals("Number of logs should not change after second run", logCountAfter, cm.getAllStepLogs(stepRun.getID()).size());
assertEquals("Number of archived logs should not change after second run", logCountExistsAfter, getExistsCount(cm.getAllStepLogs(stepRun.getID())));
// check if logs got indexed/searchable where applicable
if (!StringUtils.isBlank(stringToBeFoundBySearch())) {
if (log.isDebugEnabled()) log.debug("Check if can find in log: " + stringToBeFoundBySearch());
Hits results = searchManager.search(new SearchRequest(stringToBeFoundBySearch()));
if (results.length() == 0) {
// let indexer queue to process logs
Thread.sleep(500);
// retry search
results = searchManager.search(new SearchRequest(stringToBeFoundBySearch()));
}
assertTrue(results.length() > 0);
// check if required fields are there
for (int i = 0; i < results.length(); i++) {
final Document result = results.doc(i);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_ID);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_NAME);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_RUN_NUMBER);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_RUN_ID);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_STARTED);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_DOCUMENT_TYPE);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_LOG_ID);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_LOG_DESCR);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_LOG_CONFIG_PATH);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_LOG_PATH_TYPE);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_LOG_TYPE);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_SEQUENCE_NAME);
}
}
}
示例14: testProcess
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
/**
* Test
*/
public final void testProcess() throws Exception {
// init current number of result files
final StepRun stepRun = cm.getStepRun(stepRunID());
final List resultsBefore = cm.getAllStepResults(stepRun);
final int resultCountBefore = resultsBefore.size();
final int resultCountExistsBefore = getExistsCount(resultsBefore);
// call implementor's process results method
if (LOG.isDebugEnabled()) {
LOG.debug("first run");
}
processResults();
// get counters after processing
final List resultsAfter = cm.getAllStepResults(stepRun);
final int resultCountAfter = resultsAfter.size();
final int resultCountExistsAfter = getExistsCount(resultsAfter);
// check if results in the db
if (LOG.isDebugEnabled()) {
LOG.debug("resultCountBefore = " + resultCountBefore);
}
if (LOG.isDebugEnabled()) {
LOG.debug("resultCountAfter = " + resultCountAfter);
}
assertTrue("Number of results after should be bigger then before", resultCountAfter > resultCountBefore);
assertTrue("Number of results should increase", resultCountAfter - resultCountBefore >= 1);
assertEquals("Number of results accessible from archive should be the same as in the database", resultCountExistsAfter - resultCountExistsBefore, resultCountAfter - resultCountBefore);
// run second time to make sure same results are not picked twice.
if (LOG.isDebugEnabled()) {
LOG.debug("second run");
}
processResults();
assertEquals("Number of results should not change after second run", resultCountAfter, cm.getAllStepResults(stepRun).size());
assertEquals("Number of archived results should not change after second run", resultCountExistsAfter, getExistsCount(resultsAfter));
// check if results got indexed/searchable where applicable
if (!StringUtils.isBlank(stringToBeFoundBySearch())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Check if can find in result: " + stringToBeFoundBySearch());
}
Hits results = searchManager.search(new SearchRequest(stringToBeFoundBySearch()));
if (results.length() == 0) {
// let indexer queue to process results
Thread.sleep(500);
// retry search
results = searchManager.search(new SearchRequest(stringToBeFoundBySearch()));
}
assertTrue(results.length() > 0);
// check if required fields are there
for (int i = 0; i < results.length(); i++) {
final Document result = results.doc(i);
// header
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_ID);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_NAME);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_RUN_NUMBER);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_RUN_ID);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_BUILD_STARTED);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_DOCUMENT_TYPE);
// result specific
assertFieldPresent(result, LuceneDocumentFactory.FIELD_RESULT_FILE_NAME);
assertFieldPresent(result, LuceneDocumentFactory.FIELD_RESULT_STEP_RESULT_ID);
}
}
}
示例15: search
import org.apache.lucene.search.Hits; //导入方法依赖的package包/类
/**
* Execute search using index.
*
* @param indexPath
* the index path
* @param queryString
* the query string
* @return Returns an array of <code>SearchResultEntry</code> otherwise
* null.
*/
public SearchResultEntry[] search(String queryString) {
ArrayList<SearchResultEntry> result = new ArrayList<SearchResultEntry>();
try {
Searcher searcher = new IndexSearcher(indexPath);
Analyzer analyzer = new StandardAnalyzer();
String line = queryString;
logger.debug("Query: " + line);
QueryParser queryParser=new QueryParser("contents", analyzer);
Query query = queryParser.parse(line);
logger.debug("\nSearching for: '" + query.toString("contents")
+ "'");
Hits hits = searcher.search(query);
logger.debug("Search result: " + hits.length()
+ " total matching documents");
for (int i = 0; i < hits.length(); i++) {
Document doc = hits.doc(i);
String path = doc.get("path");
if (path != null) {
logger.debug(i + ". " + path);
// i'm interested only in concepts so filter out
// non-concepts (HACK)
// TODO this filter is here because of obsolete indexes -
// there should be deleted
if (path.indexOf(File.separator + "concepts"
+ File.separator) >= 0) {
result.add(new SearchResultEntry(doc
.get("outlineLabel"), doc.get("conceptLabel"),
doc.get("path")));
// logger.debug("path:\n"+doc.get("path"));
// logger.debug("modified:\n"+doc.get("modified"));
// logger.debug("notebook:\n"+doc.get("outlineLabel"));
// logger.debug("concept:\n"+doc.get("conceptLabel"));
}
} else {
String url = doc.get("url");
if (url != null) {
logger.debug(i + ". " + url);
logger.debug(" - " + doc.get("title"));
} else {
logger.debug(i + ". "
+ "No path nor URL for this document");
}
}
}
searcher.close();
return (SearchResultEntry[]) result
.toArray(new SearchResultEntry[result.size()]);
} catch (Exception e) {
logger.error("Caught a " + e.getClass() + "\n with message: "
+ e.getMessage(), e);
}
return null;
}