本文整理汇总了Java中org.apache.lucene.search.highlight.QueryScorer类的典型用法代码示例。如果您正苦于以下问题:Java QueryScorer类的具体用法?Java QueryScorer怎么用?Java QueryScorer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
QueryScorer类属于org.apache.lucene.search.highlight包,在下文中一共展示了QueryScorer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getHighlightString
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
public static String getHighlightString (String text, String keyword) throws IOException {
TermQuery query = new TermQuery(new Term("f", keyword));
QueryScorer scorer = new QueryScorer(query);
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"highlight\">","</span>");
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(50);
highlighter.setTextFragmenter(fragmenter);
TokenStream tokenStream = new StandardAnalyzer(Version.LUCENE_20).tokenStream("f", new StringReader(text));
//String result = highlighter.getBestFragments(tokenStream, text, 30, "...");
StringBuilder writer = new StringBuilder("");
writer.append("<html>");
writer.append("<style>\n" +
".highlight {\n" +
" background: yellow;\n" +
"}\n" +
"</style>");
writer.append("<body>");
writer.append("");
writer.append("</body></html>");
return ( writer.toString() );
}
示例2: searToHighlighterCss
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
/**
* ����
* @param analyzer
* @param searcher
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public void searToHighlighterCss(Analyzer analyzer,IndexSearcher searcher) throws IOException, InvalidTokenOffsetsException{
Term term =new Term("Content", new String("免费".getBytes(),"GBK"));//��ѯ��������˼����Ҫ�����Ա�Ϊ���������
TermQuery query =new TermQuery(term);
TopDocs docs =searcher.search(query, 10);//����
/**�Զ����ע�����ı���ǩ*/
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"hightlighterCss\">","</span>");
/**����QueryScorer*/
QueryScorer scorer=new QueryScorer(query);
/**����Fragmenter*/
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
Highlighter highlight=new Highlighter(formatter,scorer);
highlight.setTextFragmenter(fragmenter);
for(ScoreDoc doc:docs.scoreDocs){//��ȡ���ҵ��ĵ����������
Document document =searcher.doc(doc.doc);
String value = document.getField("Content").toString();
TokenStream tokenStream = analyzer.tokenStream("Content", new StringReader(value));
String str1 = highlight.getBestFragment(tokenStream, value);
System.out.println(str1);
}
}
示例3: getHighlighterList
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
private List<LumongoHighlighter> getHighlighterList(List<HighlightRequest> highlightRequests, Query q) {
if (highlightRequests.isEmpty()) {
return Collections.emptyList();
}
List<LumongoHighlighter> highlighterList = new ArrayList<>();
for (HighlightRequest highlight : highlightRequests) {
QueryScorer queryScorer = new QueryScorer(q, highlight.getField());
queryScorer.setExpandMultiTermQuery(true);
Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer, highlight.getFragmentLength());
SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter(highlight.getPreTag(), highlight.getPostTag());
LumongoHighlighter highlighter = new LumongoHighlighter(simpleHTMLFormatter, queryScorer, highlight);
highlighter.setTextFragmenter(fragmenter);
highlighterList.add(highlighter);
}
return highlighterList;
}
示例4: testHits
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
public void testHits() throws Exception {
IndexSearcher searcher = new IndexSearcher(TestUtil.getBookIndexDirectory());
TermQuery query = new TermQuery(new Term("title", "action"));
TopDocs hits = searcher.search(query, 10);
QueryScorer scorer = new QueryScorer(query, "title");
Highlighter highlighter = new Highlighter(scorer);
highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer));
Analyzer analyzer = new SimpleAnalyzer();
for (ScoreDoc sd : hits.scoreDocs) {
StoredDocument doc = searcher.doc(sd.doc);
String title = doc.get("title");
TokenStream stream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), sd.doc, "title", doc,
analyzer);
String fragment = highlighter.getBestFragment(stream, title);
LOGGER.info(fragment);
}
}
示例5: testHighlightPhrase
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
public void testHighlightPhrase() throws Exception {
Query query = new PhraseQuery.Builder()
.add(new Term("field", "foo"))
.add(new Term("field", "bar"))
.build();
QueryScorer queryScorer = new CustomQueryScorer(query);
org.apache.lucene.search.highlight.Highlighter highlighter = new org.apache.lucene.search.highlight.Highlighter(queryScorer);
String[] frags = highlighter.getBestFragments(new MockAnalyzer(random()), "field", "bar foo bar foo", 10);
assertArrayEquals(new String[] {"bar <B>foo</B> <B>bar</B> foo"}, frags);
}
示例6: displayHtmlHighlight
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
static String displayHtmlHighlight(Query query, Analyzer analyzer, String fieldName, String fieldContent,
int fragmentSize) throws IOException, InvalidTokenOffsetsException {
Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color='red'>", "</font>"),
new QueryScorer(query));
Fragmenter fragmenter = new SimpleFragmenter(fragmentSize);
highlighter.setTextFragmenter(fragmenter);
return highlighter.getBestFragment(analyzer, fieldName, fieldContent);
}
示例7: search
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
public static void search(String indexDir, String q) throws Exception {
Directory dir = FSDirectory.open(Paths.get(indexDir));
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher is = new IndexSearcher(reader);
// Analyzer analyzer=new StandardAnalyzer(); // 标准分词器
SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
QueryParser parser = new QueryParser("desc", analyzer);
Query query = parser.parse(q);
long start = System.currentTimeMillis();
TopDocs hits = is.search(query, 10);
long end = System.currentTimeMillis();
System.out.println("匹配 " + q + " ,总共花费" + (end - start) + "毫秒" + "查询到" + hits.totalHits + "个记录");
QueryScorer scorer = new QueryScorer(query);
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
highlighter.setTextFragmenter(fragmenter);
for (ScoreDoc scoreDoc : hits.scoreDocs) {
Document doc = is.doc(scoreDoc.doc);
System.out.println(doc.get("city"));
System.out.println(doc.get("desc"));
String desc = doc.get("desc");
if (desc != null) {
TokenStream tokenStream = analyzer.tokenStream("desc", new StringReader(desc));
System.out.println(highlighter.getBestFragment(tokenStream, desc));
}
}
reader.close();
}
示例8: search
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public List<Post> search(Paging paging, String q) throws Exception {
FullTextSession fullTextSession = Search.getFullTextSession(super.session());
SearchFactory sf = fullTextSession.getSearchFactory();
QueryBuilder qb = sf.buildQueryBuilder().forEntity(PostPO.class).get();
org.apache.lucene.search.Query luceneQuery = qb.keyword().onFields("title","summary","tags").matching(q).createQuery();
FullTextQuery query = fullTextSession.createFullTextQuery(luceneQuery);
query.setFirstResult(paging.getFirstResult());
query.setMaxResults(paging.getMaxResults());
StandardAnalyzer standardAnalyzer = new StandardAnalyzer();
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span style='color:red;'>", "</span>");
QueryScorer queryScorer = new QueryScorer(luceneQuery);
Highlighter highlighter = new Highlighter(formatter, queryScorer);
List<PostPO> list = query.list();
List<Post> rets = new ArrayList<>(list.size());
for (PostPO po : list) {
Post m = BeanMapUtils.copy(po, 0);
// 处理高亮
String title = highlighter.getBestFragment(standardAnalyzer, "title", m.getTitle());
String summary = highlighter.getBestFragment(standardAnalyzer, "summary", m.getSummary());
if (StringUtils.isNotEmpty(title)) {
m.setTitle(title);
}
if (StringUtils.isNotEmpty(summary)) {
m.setSummary(summary);
}
rets.add(m);
}
paging.setTotalCount(query.getResultSize());
return rets;
}
示例9: HighlightingHelper
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
HighlightingHelper(Query query, Analyzer analyzer) {
this.analyzer = analyzer;
Formatter formatter = new SimpleHTMLFormatter();
Encoder encoder = new MinimalHTMLEncoder();
scorer = new QueryScorer(query);
highlighter = new Highlighter(formatter, encoder, scorer);
fragmentLength = DEFAULT_FRAGMENT_LENGTH;
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, fragmentLength);
highlighter.setTextFragmenter(fragmenter);
}
示例10: getBenchmarkHighlighter
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
@Override
protected BenchmarkHighlighter getBenchmarkHighlighter(Query q){
highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
highlighter.setMaxDocCharsToAnalyze(maxDocCharsToAnalyze);
return new BenchmarkHighlighter(){
@Override
public int doHighlight(IndexReader reader, int doc, String field,
Document document, Analyzer analyzer, String text) throws Exception {
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
return frag != null ? frag.length : 0;
}
};
}
示例11: getBenchmarkHighlighter
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
@Override
public BenchmarkHighlighter getBenchmarkHighlighter(Query q) {
highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(q));
return new BenchmarkHighlighter() {
@Override
public int doHighlight(IndexReader reader, int doc, String field, Document document, Analyzer analyzer, String text) throws Exception {
TokenStream ts = TokenSources.getAnyTokenStream(reader, doc, field, document, analyzer);
TextFragment[] frag = highlighter.getBestTextFragments(ts, text, mergeContiguous, maxFrags);
numHighlightedResults += frag != null ? frag.length : 0;
return frag != null ? frag.length : 0;
}
};
}
示例12: createHighlighter
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
public static Object createHighlighter(Query query,String highlightBegin,String highlightEnd) {
return new Highlighter(
//new SimpleHTMLFormatter("<span class=\"matching-term\">","</span>"),
new SimpleHTMLFormatter(highlightBegin,highlightEnd),
new QueryScorer(query));
}
示例13: searchCorpus
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
/**
* Searches the current corpus using the search terms in the search field.
*/
private void searchCorpus() {
if (search.getText().trim().equals("")) return;
try {
indexSearcher = guess.getSelected() != null ?
getIndex(getDiffCorpus(gold.getSelected(), guess.getSelected())) :
getIndex(gold.getSelected());
//System.out.println("Searching...");
QueryParser parser = new QueryParser("Word", analyzer);
Query query = parser.parse(search.getText());
Hits hits = indexSearcher.search(query);
Highlighter highlighter = new Highlighter(new QueryScorer(query));
DefaultListModel model = new DefaultListModel();
for (int i = 0; i < hits.length(); i++) {
Document hitDoc = hits.doc(i);
int nr = Integer.parseInt(hitDoc.get("<nr>"));
//System.out.println(hitDoc.get("<nr>"));
String best = null;
for (Object field : hitDoc.getFields()) {
Field f = (Field) field;
best = highlighter.getBestFragment(analyzer, f.name(), hitDoc.get(f.name()));
if (best != null) break;
}
if (best != null)
model.addElement(new Result(nr, "<html>" + nr + ":" + best + "</html>"));
//System.out.println(highlighter.getBestFragment(analyzer, "Word", hitDoc.get("Word")));
//assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
}
results.setModel(model);
repaint();
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例14: createHighlighter
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
protected Highlighter createHighlighter(org.apache.lucene.search.Query luceneQuery) {
SimpleHTMLFormatter format = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
Highlighter highlighter = new Highlighter(format, new QueryScorer(luceneQuery));// 高亮
// highlighter.setTextFragmenter(new
// SimpleFragmenter(Integer.MAX_VALUE));
highlighter.setTextFragmenter(new SimpleFragmenter(200));
return highlighter;
}
示例15: doHighlight
import org.apache.lucene.search.highlight.QueryScorer; //导入依赖的package包/类
/**
* Highlight (bold,color) query words in result-document. Set HighlightResult for content or description.
*
* @param query
* @param analyzer
* @param doc
* @param resultDocument
* @throws IOException
*/
private void doHighlight(final Query query, final Analyzer analyzer, final Document doc, final ResultDocument resultDocument) throws IOException {
final Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(HIGHLIGHT_PRE_TAG, HIGHLIGHT_POST_TAG), new QueryScorer(query));
// Get 3 best fragments of content and seperate with a "..."
try {
// highlight content
final String content = doc.get(AbstractOlatDocument.CONTENT_FIELD_NAME);
TokenStream tokenStream = analyzer.tokenStream(AbstractOlatDocument.CONTENT_FIELD_NAME, new StringReader(content));
String highlightResult = highlighter.getBestFragments(tokenStream, content, 3, HIGHLIGHT_SEPARATOR);
// if no highlightResult is in content => look in description
if (highlightResult.length() == 0) {
final String description = doc.get(AbstractOlatDocument.DESCRIPTION_FIELD_NAME);
tokenStream = analyzer.tokenStream(AbstractOlatDocument.DESCRIPTION_FIELD_NAME, new StringReader(description));
highlightResult = highlighter.getBestFragments(tokenStream, description, 3, HIGHLIGHT_SEPARATOR);
resultDocument.setHighlightingDescription(true);
}
resultDocument.setHighlightResult(highlightResult);
// highlight title
final String title = doc.get(AbstractOlatDocument.TITLE_FIELD_NAME);
tokenStream = analyzer.tokenStream(AbstractOlatDocument.TITLE_FIELD_NAME, new StringReader(title));
final String highlightTitle = highlighter.getBestFragments(tokenStream, title, 3, " ");
resultDocument.setHighlightTitle(highlightTitle);
} catch (final InvalidTokenOffsetsException e) {
log.warn("", e);
}
}