本文整理汇总了Java中org.apache.lucene.search.Searcher.search方法的典型用法代码示例。如果您正苦于以下问题:Java Searcher.search方法的具体用法?Java Searcher.search怎么用?Java Searcher.search使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.Searcher
的用法示例。
在下文中一共展示了Searcher.search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addField
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public boolean addField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
return false;
}
Document doc=hits.doc(0);
IndexWriter iw = getIndexWriter();
Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
doc.add(f);
iw.updateDocument(new Term("ID", recID), doc);
} catch (IOException ex) {
log.fatal(ex);
return false;
}
return true;
}
示例2: addField
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public boolean addField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
return false;
}
Document doc=hits.doc(0);
IndexWriter iw = getIndexWriter();
Field f=new Field(prefix,value, Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.NO);
doc.add(f);
iw.updateDocument(new Term("ID", recID), doc);
iw.close();
} catch (IOException ex) {
log.fatal(ex);
return false;
}
return true;
}
示例3: deleteField
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public void deleteField(String recID, String prefix, String value){
try {
Searcher searcher = new IndexSearcher(indexPath);
Query q=new TermQuery(new Term("ID",recID));
Hits hits=searcher.search(q);
if ((hits==null)||(hits.length()!=1)){
log.fatal("greska pri brisanju polja. Zapis: "+recID);
return ;
}
Document doc=hits.doc(0);
Field [] fields=doc.getFields(prefix);
IndexWriter iw = getIndexWriter();
doc.removeFields(prefix);
for(int i=0;i<fields.length;i++){
if(!fields[i].stringValue().equals(value)){
doc.add(fields[i]);
}
}
iw.updateDocument(new Term("ID", recID), doc);
iw.close();
} catch (IOException ex) {
log.fatal(ex);
}
}
示例4: searchPage
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
@Transactional(readOnly = true)
public Pagination searchPage(Directory dir, String queryString,String category,String workplace,
Integer siteId, Integer channelId, Date startDate, Date endDate,
int pageNo, int pageSize) throws CorruptIndexException,
IOException, ParseException {
Searcher searcher = new IndexSearcher(dir);
try {
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
channelId, startDate, endDate, analyzer);
TopDocs docs = searcher.search(query, pageNo * pageSize);
Pagination p = LuceneContent.getResultPage(searcher, docs, pageNo,
pageSize);
List<?> ids = p.getList();
List<Content> contents = new ArrayList<Content>(ids.size());
for (Object id : ids) {
contents.add(contentMng.findById((Integer) id));
}
p.setList(contents);
return p;
} finally {
searcher.close();
}
}
示例5: selectAll
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public Result selectAll(Query query, String sortPrefix){
try {
BooleanQuery.setMaxClauseCount(20000);//zbog heap-a
Searcher searcher = new IndexSearcher(indexPath);
Hits hits;
if (sortPrefix == null || "".equals(sortPrefix))
hits = searcher.search(query);
else {
int sortType = SortField.STRING;
if ("RN_sort".equals(sortPrefix))
sortType = SortField.INT;
hits = searcher.search(query, new Sort(
new SortField(sortPrefix, sortType)));
}
int n = hits.length();
int[] retVal = new int[n];
List<String> invs = new ArrayList<String>();
Field[] tmp = null;
for (int i = 0; i < n; i++) {
String recordID = hits.doc(i).get("ID");
retVal[i] = Integer.parseInt(recordID);
tmp = hits.doc(i).getFields("IN");
if (tmp != null){
for (int j = 0; j<tmp.length; j++){
invs.add(tmp[j].stringValue());
}
}
}
searcher.close();
Result result = new Result();
result.setRecords(retVal);
result.setInvs(invs);
return result;
} catch (Exception ex) {
log.fatal(ex);
return null;
}
}
示例6: select
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public int[] select(Query query, Filter filter, String sortPrefix){
try {
BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
Searcher searcher = new IndexSearcher(indexPath);
Hits hits;
if (sortPrefix == null || "".equals(sortPrefix)){
hits = searcher.search(query,filter);
} else {
int sortType = SortField.STRING;
if ("RN_sort".equals(sortPrefix))
sortType = SortField.INT;
hits = searcher.search(query,filter, new Sort(
new SortField(sortPrefix, sortType)));
}
int n = hits.length();
int[] retVal = new int[n];
for (int i = 0; i < n; i++) {
String recordID = hits.doc(i).get("ID");
retVal[i] = Integer.parseInt(recordID);
}
searcher.close();
return retVal;
} catch (Exception ex) {
log.fatal(ex);
return null;
}
}
示例7: selectExpand
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public List<String> selectExpand(String query, String prefix,String text){
try {
WhitespaceAnalyzer sa= new WhitespaceAnalyzer();
BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
QueryParser p = new QueryParser("contents", sa);
Query q = p.parse(query);
Searcher searcher = new IndexSearcher(indexPath);
StopWatch clock=new StopWatch();
clock.start();
Hits hits = searcher.search(q);
int n = hits.length();
List <String> expandList = new ArrayList<String>();
Field[] tmp = null;
String pom="";
for (int i = 0; i < n; i++) {
tmp = hits.doc(i).getFields(prefix);
if (tmp != null){
for (int j = 0; j<tmp.length; j++){
pom=tmp[j].stringValue().replace("0start0 ", "");
pom=pom.replace(" 0end0", "");
if(pom.startsWith(text)&&(!expandList.contains(pom))){
expandList.add(pom);
}
}
}
}
clock.stop();
searcher.close();
return expandList;
} catch (Exception ex) {
log.fatal(ex);
return null;
}
}
示例8: doPagingSearch
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
/**
* This demonstrates a typical paging search scenario, where the search engine presents pages of size n to the user. The user can
* then go to the next page if interested in the next hits.
*
* When the query is executed for the first time, then only enough results are collected to fill 5 result pages. If the user wants
* to page beyond this limit, then the query is executed another time and all hits are collected.
*
*/
public static ScoreDoc[] doPagingSearch(Searcher searcher, Query query, int noOfPages) throws IOException {
// Collect enough docs to show 5 pages
TopScoreDocCollector collector = TopScoreDocCollector.create(noOfPages, true);
searcher.search(query, collector);
ScoreDoc[] hits = collector.topDocs().scoreDocs;
int numTotalHits = collector.getTotalHits();
// System.out.println("Confidence Score : : "+hits.length);
System.out.println(numTotalHits + " total matching documents");
return hits;
}
示例9: searchList
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
@Transactional(readOnly = true)
public List<Content> searchList(Directory dir, String queryString,String category,String workplace,
Integer siteId, Integer channelId, Date startDate, Date endDate,
int first, int max) throws CorruptIndexException, IOException,
ParseException {
Searcher searcher = new IndexSearcher(dir);
try {
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
channelId, startDate, endDate, analyzer);
if (first < 0) {
first = 0;
}
if (max < 0) {
max = 0;
}
TopDocs docs = searcher.search(query, first + max);
List<Integer> ids = LuceneContent.getResultList(searcher, docs,
first, max);
List<Content> contents = new ArrayList<Content>(ids.size());
for (Object id : ids) {
contents.add(contentMng.findById((Integer) id));
}
return contents;
} finally {
searcher.close();
}
}
示例10: query
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
@Override
public <T> void query(
@NonNull Collection<? super T> result,
@NonNull Convertor<? super Document, T> convertor,
@NullAllowed FieldSelector selector,
@NullAllowed AtomicBoolean cancel,
@NonNull Query... queries) throws IOException, InterruptedException {
Parameters.notNull("queries", queries); //NOI18N
Parameters.notNull("convertor", convertor); //NOI18N
Parameters.notNull("result", result); //NOI18N
if (selector == null) {
selector = AllFieldsSelector.INSTANCE;
}
lock.readLock().lock();
try {
final IndexReader in = getReader();
if (in == null) {
return;
}
final BitSet bs = new BitSet(in.maxDoc());
final Collector c = new BitSetCollector(bs);
final Searcher searcher = new IndexSearcher(in);
try {
for (Query q : queries) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
searcher.search(q, c);
}
} finally {
searcher.close();
}
for (int docNum = bs.nextSetBit(0); docNum >= 0; docNum = bs.nextSetBit(docNum+1)) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
final Document doc = in.document(docNum, selector);
final T value = convertor.convert(doc);
if (value != null) {
result.add (value);
}
}
} finally {
lock.readLock().unlock();
}
}
示例11: queryDocTerms
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
@Override
public <S, T> void queryDocTerms(
@NonNull Map<? super T, Set<S>> result,
@NonNull Convertor<? super Document, T> convertor,
@NonNull Convertor<? super Term, S> termConvertor,
@NullAllowed FieldSelector selector,
@NullAllowed AtomicBoolean cancel,
@NonNull Query... queries) throws IOException, InterruptedException {
Parameters.notNull("result", result); //NOI18N
Parameters.notNull("convertor", convertor); //NOI18N
Parameters.notNull("termConvertor", termConvertor); //NOI18N
Parameters.notNull("queries", queries); //NOI18N
if (selector == null) {
selector = AllFieldsSelector.INSTANCE;
}
lock.readLock().lock();
try {
final IndexReader in = getReader();
if (in == null) {
return;
}
final BitSet bs = new BitSet(in.maxDoc());
final Collector c = new BitSetCollector(bs);
final Searcher searcher = new IndexSearcher(in);
final TermCollector termCollector = new TermCollector(c);
try {
for (Query q : queries) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
if (q instanceof TermCollector.TermCollecting) {
((TermCollector.TermCollecting)q).attach(termCollector);
} else {
throw new IllegalArgumentException (
String.format("Query: %s does not implement TermCollecting", //NOI18N
q.getClass().getName()));
}
searcher.search(q, termCollector);
}
} finally {
searcher.close();
}
for (int docNum = bs.nextSetBit(0); docNum >= 0; docNum = bs.nextSetBit(docNum+1)) {
if (cancel != null && cancel.get()) {
throw new InterruptedException ();
}
final Document doc = in.document(docNum, selector);
final T value = convertor.convert(doc);
if (value != null) {
final Set<Term> terms = termCollector.get(docNum);
if (terms != null) {
result.put (value, convertTerms(termConvertor, terms));
}
}
}
} finally {
lock.readLock().unlock();
}
}
示例12: main
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public static void main(String[] args) {
try {
Searcher searcher = new IndexSearcher("index");
Analyzer analyzer = new StandardAnalyzer();
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
while (true) {
System.out.print("Query: ");
String line = in.readLine();
if (line.length() == -1)
break;
Query query = QueryParser.parse(line, "contents", analyzer);
System.out.println("Searching for: " + query.toString("contents"));
Hits hits = searcher.search(query);
System.out.println(hits.length() + " total matching documents");
final int HITS_PER_PAGE = 10;
for (int start = 0; start < hits.length(); start += HITS_PER_PAGE) {
int end = Math.min(hits.length(), start + HITS_PER_PAGE);
for (int i = start; i < end; i++) {
Document doc = hits.doc(i);
String path = doc.get("path");
if (path != null) {
System.out.println(i + ". " + path);
} else {
String url = doc.get("url");
if (url != null) {
System.out.println(i + ". " + url);
System.out.println(" - " + doc.get("title"));
} else {
System.out.println(i + ". " + "No path nor URL for this document");
}
}
}
if (hits.length() > end) {
System.out.print("more (y/n) ? ");
line = in.readLine();
if (line.length() == 0 || line.charAt(0) == 'n')
break;
}
}
}
searcher.close();
} catch (Exception e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
示例13: search
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
/**
* Do the search.
*
* @param conn the database connection
* @param text the query
* @param limit the limit
* @param offset the offset
* @param data whether the raw data should be returned
* @return the result set
*/
protected static ResultSet search(Connection conn, String text,
int limit, int offset, boolean data) throws SQLException {
SimpleResultSet result = createResultSet(data);
if (conn.getMetaData().getURL().startsWith("jdbc:columnlist:")) {
// this is just to query the result set columns
return result;
}
if (text == null || text.trim().length() == 0) {
return result;
}
try {
IndexAccess access = getIndexAccess(conn);
// take a reference as the searcher may change
Searcher searcher = access.searcher;
// reuse the same analyzer; it's thread-safe;
// also allows subclasses to control the analyzer used.
Analyzer analyzer = access.writer.getAnalyzer();
QueryParser parser = new QueryParser(Version.LUCENE_30,
LUCENE_FIELD_DATA, analyzer);
Query query = parser.parse(text);
// Lucene 3 insists on a hard limit and will not provide
// a total hits value. Take at least 100 which is
// an optimal limit for Lucene as any more
// will trigger writing results to disk.
int maxResults = (limit == 0 ? 100 : limit) + offset;
TopDocs docs = searcher.search(query, maxResults);
if (limit == 0) {
limit = docs.totalHits;
}
for (int i = 0, len = docs.scoreDocs.length;
i < limit && i + offset < docs.totalHits
&& i + offset < len; i++) {
ScoreDoc sd = docs.scoreDocs[i + offset];
Document doc = searcher.doc(sd.doc);
float score = sd.score;
String q = doc.get(LUCENE_FIELD_QUERY);
if (data) {
int idx = q.indexOf(" WHERE ");
JdbcConnection c = (JdbcConnection) conn;
Session session = (Session) c.getSession();
Parser p = new Parser(session);
String tab = q.substring(0, idx);
ExpressionColumn expr = (ExpressionColumn) p.parseExpression(tab);
String schemaName = expr.getOriginalTableAliasName();
String tableName = expr.getColumnName();
q = q.substring(idx + " WHERE ".length());
Object[][] columnData = parseKey(conn, q);
result.addRow(
schemaName,
tableName,
columnData[0],
columnData[1],
score);
} else {
result.addRow(q, score);
}
}
} catch (Exception e) {
throw convertException(e);
}
return result;
}
示例14: selectAll1
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public Result selectAll1(String query, String sortPrefix)throws ParseException{
try {
WhitespaceAnalyzer sa= new WhitespaceAnalyzer();
Searcher searcher = new IndexSearcher(indexPath);
BooleanQuery.setMaxClauseCount(20000);//zbog heap-a
QueryParser p = new QueryParser("KW", sa);
p.setDefaultOperator(QueryParser.Operator.AND); //default operator je AND a ne OR kao sto je inace inicijalno
Query q = p.parse(query);
Hits hits;
if (sortPrefix == null || "".equals(sortPrefix))
hits = searcher.search(q);
else {
int sortType = SortField.STRING;
if ("RN_sort".equals(sortPrefix))
sortType = SortField.INT;
hits = searcher.search(q, new Sort(
new SortField(sortPrefix, sortType)));
}
int n = hits.length();
int[] retVal = new int[n];
List<String> invs = new ArrayList<String>();
Field[] tmp = null;
for (int i = 0; i < n; i++) {
String recordID = hits.doc(i).get("ID");
retVal[i] = Integer.parseInt(recordID);
tmp = hits.doc(i).getFields("IN");
if (tmp != null){
for (int j = 0; j<tmp.length; j++){
invs.add(tmp[j].stringValue());
}
}
}
searcher.close();
Result result = new Result();
result.setRecords(retVal);
result.setInvs(invs);
return result;
} catch (Exception ex) {
if (ex instanceof ParseException )
throw (ParseException)ex;
log.fatal(ex);
return null;
}
}
示例15: search
import org.apache.lucene.search.Searcher; //导入方法依赖的package包/类
public SearchResult search(SearchCriteria criteria, List<MusicFolder> musicFolders, IndexType indexType) {
SearchResult result = new SearchResult();
int offset = criteria.getOffset();
int count = criteria.getCount();
result.setOffset(offset);
IndexReader reader = null;
try {
reader = createIndexReader(indexType);
Searcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new SubsonicAnalyzer();
MultiFieldQueryParser queryParser = new MultiFieldQueryParser(LUCENE_VERSION, indexType.getFields(), analyzer, indexType.getBoosts());
BooleanQuery query = new BooleanQuery();
query.add(queryParser.parse(analyzeQuery(criteria.getQuery())), BooleanClause.Occur.MUST);
List<SpanTermQuery> musicFolderQueries = new ArrayList<SpanTermQuery>();
for (MusicFolder musicFolder : musicFolders) {
if (indexType == ALBUM_ID3 || indexType == ARTIST_ID3) {
musicFolderQueries.add(new SpanTermQuery(new Term(FIELD_FOLDER_ID, NumericUtils.intToPrefixCoded(musicFolder.getId()))));
} else {
musicFolderQueries.add(new SpanTermQuery(new Term(FIELD_FOLDER, musicFolder.getPath().getPath())));
}
}
query.add(new SpanOrQuery(musicFolderQueries.toArray(new SpanQuery[musicFolderQueries.size()])), BooleanClause.Occur.MUST);
TopDocs topDocs = searcher.search(query, null, offset + count);
result.setTotalHits(topDocs.totalHits);
int start = Math.min(offset, topDocs.totalHits);
int end = Math.min(start + count, topDocs.totalHits);
for (int i = start; i < end; i++) {
Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
switch (indexType) {
case SONG:
case ARTIST:
case ALBUM:
MediaFile mediaFile = mediaFileService.getMediaFile(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(mediaFile, result.getMediaFiles());
break;
case ARTIST_ID3:
Artist artist = artistDao.getArtist(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(artist, result.getArtists());
break;
case ALBUM_ID3:
Album album = albumDao.getAlbum(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(album, result.getAlbums());
break;
default:
break;
}
}
} catch (Throwable x) {
LOG.error("Failed to execute Lucene search.", x);
} finally {
FileUtil.closeQuietly(reader);
}
return result;
}