本文整理汇总了Java中org.apache.lucene.search.highlight.Highlighter.getBestFragment方法的典型用法代码示例。如果您正苦于以下问题:Java Highlighter.getBestFragment方法的具体用法?Java Highlighter.getBestFragment怎么用?Java Highlighter.getBestFragment使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.highlight.Highlighter
的用法示例。
在下文中一共展示了Highlighter.getBestFragment方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: searToHighlighterCss
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
/**
* ����
* @param analyzer
* @param searcher
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public void searToHighlighterCss(Analyzer analyzer,IndexSearcher searcher) throws IOException, InvalidTokenOffsetsException{
Term term =new Term("Content", new String("免费".getBytes(),"GBK"));//��ѯ��������˼����Ҫ�����Ա�Ϊ���������
TermQuery query =new TermQuery(term);
TopDocs docs =searcher.search(query, 10);//����
/**�Զ����ע�����ı���ǩ*/
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"hightlighterCss\">","</span>");
/**����QueryScorer*/
QueryScorer scorer=new QueryScorer(query);
/**����Fragmenter*/
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
Highlighter highlight=new Highlighter(formatter,scorer);
highlight.setTextFragmenter(fragmenter);
for(ScoreDoc doc:docs.scoreDocs){//��ȡ���ҵ��ĵ����������
Document document =searcher.doc(doc.doc);
String value = document.getField("Content").toString();
TokenStream tokenStream = analyzer.tokenStream("Content", new StringReader(value));
String str1 = highlight.getBestFragment(tokenStream, value);
System.out.println(str1);
}
}
示例2: testHits
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
public void testHits() throws Exception {
IndexSearcher searcher = new IndexSearcher(TestUtil.getBookIndexDirectory());
TermQuery query = new TermQuery(new Term("title", "action"));
TopDocs hits = searcher.search(query, 10);
QueryScorer scorer = new QueryScorer(query, "title");
Highlighter highlighter = new Highlighter(scorer);
highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer));
Analyzer analyzer = new SimpleAnalyzer();
for (ScoreDoc sd : hits.scoreDocs) {
StoredDocument doc = searcher.doc(sd.doc);
String title = doc.get("title");
TokenStream stream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), sd.doc, "title", doc,
analyzer);
String fragment = highlighter.getBestFragment(stream, title);
LOGGER.info(fragment);
}
}
示例3: displayHtmlHighlight
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
static String displayHtmlHighlight(Query query, Analyzer analyzer, String fieldName, String fieldContent,
int fragmentSize) throws IOException, InvalidTokenOffsetsException {
Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color='red'>", "</font>"),
new QueryScorer(query));
Fragmenter fragmenter = new SimpleFragmenter(fragmentSize);
highlighter.setTextFragmenter(fragmenter);
return highlighter.getBestFragment(analyzer, fieldName, fieldContent);
}
示例4: search
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public List<Post> search(Paging paging, String q) throws Exception {
FullTextSession fullTextSession = Search.getFullTextSession(super.session());
SearchFactory sf = fullTextSession.getSearchFactory();
QueryBuilder qb = sf.buildQueryBuilder().forEntity(PostPO.class).get();
org.apache.lucene.search.Query luceneQuery = qb.keyword().onFields("title","summary","tags").matching(q).createQuery();
FullTextQuery query = fullTextSession.createFullTextQuery(luceneQuery);
query.setFirstResult(paging.getFirstResult());
query.setMaxResults(paging.getMaxResults());
StandardAnalyzer standardAnalyzer = new StandardAnalyzer();
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span style='color:red;'>", "</span>");
QueryScorer queryScorer = new QueryScorer(luceneQuery);
Highlighter highlighter = new Highlighter(formatter, queryScorer);
List<PostPO> list = query.list();
List<Post> rets = new ArrayList<>(list.size());
for (PostPO po : list) {
Post m = BeanMapUtils.copy(po, 0);
// 处理高亮
String title = highlighter.getBestFragment(standardAnalyzer, "title", m.getTitle());
String summary = highlighter.getBestFragment(standardAnalyzer, "summary", m.getSummary());
if (StringUtils.isNotEmpty(title)) {
m.setTitle(title);
}
if (StringUtils.isNotEmpty(summary)) {
m.setSummary(summary);
}
rets.add(m);
}
paging.setTotalCount(query.getResultSize());
return rets;
}
示例5: searchCorpus
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
/**
* Searches the current corpus using the search terms in the search field.
*/
private void searchCorpus() {
if (search.getText().trim().equals("")) return;
try {
indexSearcher = guess.getSelected() != null ?
getIndex(getDiffCorpus(gold.getSelected(), guess.getSelected())) :
getIndex(gold.getSelected());
//System.out.println("Searching...");
QueryParser parser = new QueryParser("Word", analyzer);
Query query = parser.parse(search.getText());
Hits hits = indexSearcher.search(query);
Highlighter highlighter = new Highlighter(new QueryScorer(query));
DefaultListModel model = new DefaultListModel();
for (int i = 0; i < hits.length(); i++) {
Document hitDoc = hits.doc(i);
int nr = Integer.parseInt(hitDoc.get("<nr>"));
//System.out.println(hitDoc.get("<nr>"));
String best = null;
for (Object field : hitDoc.getFields()) {
Field f = (Field) field;
best = highlighter.getBestFragment(analyzer, f.name(), hitDoc.get(f.name()));
if (best != null) break;
}
if (best != null)
model.addElement(new Result(nr, "<html>" + nr + ":" + best + "</html>"));
//System.out.println(highlighter.getBestFragment(analyzer, "Word", hitDoc.get("Word")));
//assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
}
results.setModel(model);
repaint();
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例6: main
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception{
ApplicationContext applicationContext=new ClassPathXmlApplicationContext("applicationContext.xml");
SessionFactory sessionFactory = applicationContext.getBean("hibernate4sessionFactory",SessionFactory.class);
FullTextSession fullTextSession = Search.getFullTextSession(sessionFactory.openSession());
//使用Hibernate Search api查询 从多个字段匹配 name、description、authors.name
// QueryBuilder qb = fullTextEntityManager.getSearchFactory().buildQueryBuilder().forEntity(Book.class ).get();
// Query luceneQuery = qb.keyword().onFields("name","description","authors.name").matching("移动互联网").createQuery();
//使用lucene api查询 从多个字段匹配 name、description、authors.name
//使用庖丁分词器
MultiFieldQueryParser queryParser=new MultiFieldQueryParser(Version.LUCENE_36, new String[]{"name","description","authors.name"}, new PaodingAnalyzer());
Query luceneQuery=queryParser.parse("实战");
FullTextQuery fullTextQuery =fullTextSession.createFullTextQuery(luceneQuery, Book.class);
//设置每页显示多少条
fullTextQuery.setMaxResults(5);
//设置当前页
fullTextQuery.setFirstResult(0);
//高亮设置
SimpleHTMLFormatter formatter=new SimpleHTMLFormatter("<b><font color='red'>", "<font/></b>");
QueryScorer queryScorer=new QueryScorer(luceneQuery);
Highlighter highlighter=new Highlighter(formatter, queryScorer);
@SuppressWarnings("unchecked")
List<Book> resultList = fullTextQuery.list();
System.out.println("共查找到["+resultList.size()+"]条记录");
for (Book book : resultList) {
String highlighterString=null;
Analyzer analyzer=new PaodingAnalyzer();
try {
//高亮name
highlighterString=highlighter.getBestFragment(analyzer, "name", book.getName());
if(highlighterString!=null){
book.setName(highlighterString);
}
//高亮authors.name
Set<Author> authors = book.getAuthors();
for (Author author : authors) {
highlighterString=highlighter.getBestFragment(analyzer, "authors.name", author.getName());
if(highlighterString!=null){
author.setName(highlighterString);
}
}
//高亮description
highlighterString=highlighter.getBestFragment(analyzer, "description", book.getDescription());
if(highlighterString!=null){
book.setDescription(highlighterString);
}
} catch (Exception e) {
}
System.out.println("书名:"+book.getName()+"\n描述:"+book.getDescription()+"\n出版日期:"+book.getPublicationDate());
System.out.println("----------------------------------------------------------");
}
fullTextSession.close();
sessionFactory.close();
}
示例7: query
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
@Override
public QueryResult<Book> query(String keyword, int start, int pagesize,Analyzer analyzer,String...field) throws Exception{
QueryResult<Book> queryResult=new QueryResult<Book>();
List<Book> books=new ArrayList<Book>();
FullTextSession fullTextSession = Search.getFullTextSession(getSession());
//使用Hibernate Search api查询 从多个字段匹配 name、description、authors.name
//QueryBuilder qb = fullTextSession.getSearchFactory().buildQueryBuilder().forEntity(Book.class ).get();
//Query luceneQuery = qb.keyword().onFields(field).matching(keyword).createQuery();
//使用lucene api查询 从多个字段匹配 name、description、authors.name
MultiFieldQueryParser queryParser=new MultiFieldQueryParser(Version.LUCENE_36,new String[]{"name","description","authors.name"}, analyzer);
Query luceneQuery=queryParser.parse(keyword);
FullTextQuery fullTextQuery = fullTextSession.createFullTextQuery(luceneQuery);
int searchresultsize = fullTextQuery.getResultSize();
queryResult.setSearchresultsize(searchresultsize);
System.out.println("共查找到["+searchresultsize+"]条记录");
fullTextQuery.setFirstResult(start);
fullTextQuery.setMaxResults(pagesize);
//设置按id排序
fullTextQuery.setSort(new Sort(new SortField("id", SortField.INT ,true)));
//高亮设置
SimpleHTMLFormatter formatter=new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
QueryScorer queryScorer=new QueryScorer(luceneQuery);
Highlighter highlighter=new Highlighter(formatter, queryScorer);
@SuppressWarnings("unchecked")
List<Book> tempresult = fullTextQuery.list();
for (Book book : tempresult) {
String highlighterString=null;
try {
//高亮name
highlighterString=highlighter.getBestFragment(analyzer, "name", book.getName());
if(highlighterString!=null){
book.setName(highlighterString);
}
//高亮authors.name
Set<Author> authors = book.getAuthors();
for (Author author : authors) {
highlighterString=highlighter.getBestFragment(analyzer, "authors.name", author.getName());
if(highlighterString!=null){
author.setName(highlighterString);
}
}
//高亮description
highlighterString=highlighter.getBestFragment(analyzer, "description", book.getDescription());
if(highlighterString!=null){
book.setDescription(highlighterString);
}
} catch (Exception e) {
}
books.add(book);
System.out.println("书名:"+book.getName()+"\n描述:"+book.getDescription()+"\n出版日期:"+book.getPublicationDate());
System.out.println("----------------------------------------------------------");
}
queryResult.setSearchresult(books);
return queryResult;
}
示例8: main
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception{
ApplicationContext applicationContext=new ClassPathXmlApplicationContext("applicationContext.xml");
EntityManagerFactory entityManagerFactory = applicationContext.getBean("entityManagerFactory",EntityManagerFactory.class);
FullTextEntityManager fullTextEntityManager = Search.getFullTextEntityManager(entityManagerFactory.createEntityManager());
//使用Hibernate Search api查询 从多个字段匹配 name、description、authors.name
// QueryBuilder qb = fullTextEntityManager.getSearchFactory().buildQueryBuilder().forEntity(Book.class ).get();
// Query luceneQuery = qb.keyword().onFields("name","description","authors.name").matching("移动互联网").createQuery();
//使用lucene api查询 从多个字段匹配 name、description、authors.name
//使用庖丁分词器
MultiFieldQueryParser queryParser=new MultiFieldQueryParser(Version.LUCENE_36, new String[]{"name","description","authors.name"}, new PaodingAnalyzer());
Query luceneQuery=queryParser.parse("实战");
FullTextQuery fullTextQuery =fullTextEntityManager.createFullTextQuery(luceneQuery, Book.class);
//设置每页显示多少条
fullTextQuery.setMaxResults(5);
//设置当前页
fullTextQuery.setFirstResult(0);
//高亮设置
SimpleHTMLFormatter formatter=new SimpleHTMLFormatter("<b><font color='red'>", "<font/></b>");
QueryScorer queryScorer=new QueryScorer(luceneQuery);
Highlighter highlighter=new Highlighter(formatter, queryScorer);
@SuppressWarnings("unchecked")
List<Book> resultList = fullTextQuery.getResultList();
for (Book book : resultList) {
String highlighterString=null;
Analyzer analyzer=new PaodingAnalyzer();
try {
//高亮name
highlighterString=highlighter.getBestFragment(analyzer, "name", book.getName());
if(highlighterString!=null){
book.setName(highlighterString);
}
//高亮authors.name
Set<Author> authors = book.getAuthors();
for (Author author : authors) {
highlighterString=highlighter.getBestFragment(analyzer, "authors.name", author.getName());
if(highlighterString!=null){
author.setName(highlighterString);
}
}
//高亮description
highlighterString=highlighter.getBestFragment(analyzer, "description", book.getDescription());
if(highlighterString!=null){
book.setDescription(highlighterString);
}
} catch (Exception e) {
}
}
fullTextEntityManager.close();
entityManagerFactory.close();
}
示例9: query
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
@Override
public QueryResult<Book> query(String keyword, int start, int pagesize,Analyzer analyzer,String...field) throws Exception{
QueryResult<Book> queryResult=new QueryResult<Book>();
List<Book> books=new ArrayList<Book>();
FullTextEntityManager fullTextEntityManager = Search.getFullTextEntityManager(em);
//使用Hibernate Search api查询 从多个字段匹配 name、description、authors.name
//QueryBuilder qb = fullTextSession.getSearchFactory().buildQueryBuilder().forEntity(Book.class ).get();
//Query luceneQuery = qb.keyword().onFields(field).matching(keyword).createQuery();
//使用lucene api查询 从多个字段匹配 name、description、authors.name
MultiFieldQueryParser queryParser=new MultiFieldQueryParser(Version.LUCENE_36,new String[]{"name","description","authors.name"}, analyzer);
Query luceneQuery=queryParser.parse(keyword);
FullTextQuery fullTextQuery = fullTextEntityManager.createFullTextQuery(luceneQuery);
int searchresultsize = fullTextQuery.getResultSize();
queryResult.setSearchresultsize(searchresultsize);
fullTextQuery.setFirstResult(start);
fullTextQuery.setMaxResults(pagesize);
//设置按id排序
fullTextQuery.setSort(new Sort(new SortField("id", SortField.INT ,true)));
//高亮设置
SimpleHTMLFormatter formatter=new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
QueryScorer queryScorer=new QueryScorer(luceneQuery);
Highlighter highlighter=new Highlighter(formatter, queryScorer);
@SuppressWarnings("unchecked")
List<Book> tempresult = fullTextQuery.getResultList();
for (Book book : tempresult) {
String highlighterString=null;
try {
//高亮name
highlighterString=highlighter.getBestFragment(analyzer, "name", book.getName());
if(highlighterString!=null){
book.setName(highlighterString);
}
//高亮authors.name
Set<Author> authors = book.getAuthors();
for (Author author : authors) {
highlighterString=highlighter.getBestFragment(analyzer, "authors.name", author.getName());
if(highlighterString!=null){
author.setName(highlighterString);
}
}
//高亮description
highlighterString=highlighter.getBestFragment(analyzer, "description", book.getDescription());
if(highlighterString!=null){
book.setDescription(highlighterString);
}
} catch (Exception e) {
}
books.add(book);
}
queryResult.setSearchresult(books);
return queryResult;
}
示例10: getHighlightedField
import org.apache.lucene.search.highlight.Highlighter; //导入方法依赖的package包/类
/**
*
*
* @param query
* @param analyzer
* @param fieldName
* @param fulltext
* @param startDelimiter
* @param stopDelimiter
* @return
*
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public static String getHighlightedField(Query query, Analyzer analyzer, String fieldName,
String fulltext, final String startDelimiter, final String stopDelimiter) throws IOException,
InvalidTokenOffsetsException {
Formatter formatter = new SimpleHTMLFormatter(startDelimiter, stopDelimiter);
QueryScorer queryScorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter, queryScorer);
highlighter.setTextFragmenter(new SimpleSpanFragmenter(queryScorer, Integer.MAX_VALUE));
highlighter.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
return highlighter.getBestFragment(analyzer, fieldName, fulltext);
}