本文整理汇总了Java中org.apache.lucene.search.highlight.Fragmenter类的典型用法代码示例。如果您正苦于以下问题:Java Fragmenter类的具体用法?Java Fragmenter怎么用?Java Fragmenter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Fragmenter类属于org.apache.lucene.search.highlight包,在下文中一共展示了Fragmenter类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getFragmenter
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
@Override
public Fragmenter getFragmenter(String fieldName, SolrParams params )
{
numRequests++;
params = SolrParams.wrapDefaults(params, defaults);
int fragsize = params.getFieldInt( fieldName, HighlightParams.FRAGSIZE, LuceneRegexFragmenter.DEFAULT_FRAGMENT_SIZE );
int increment = params.getFieldInt( fieldName, HighlightParams.INCREMENT, LuceneRegexFragmenter.DEFAULT_INCREMENT_GAP );
float slop = params.getFieldFloat( fieldName, HighlightParams.SLOP, LuceneRegexFragmenter.DEFAULT_SLOP );
int maxchars = params.getFieldInt( fieldName, HighlightParams.MAX_RE_CHARS, LuceneRegexFragmenter.DEFAULT_MAX_ANALYZED_CHARS );
String rawpat = params.getFieldParam( fieldName, HighlightParams.PATTERN, LuceneRegexFragmenter.DEFAULT_PATTERN_RAW );
Pattern p = rawpat == defaultPatternRaw ? defaultPattern : Pattern.compile(rawpat);
if( fragsize <= 0 ) {
return new NullFragmenter();
}
return new LuceneRegexFragmenter( fragsize, increment, slop, maxchars, p );
}
示例2: getHighlightString
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
public static String getHighlightString (String text, String keyword) throws IOException {
TermQuery query = new TermQuery(new Term("f", keyword));
QueryScorer scorer = new QueryScorer(query);
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"highlight\">","</span>");
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(50);
highlighter.setTextFragmenter(fragmenter);
TokenStream tokenStream = new StandardAnalyzer(Version.LUCENE_20).tokenStream("f", new StringReader(text));
//String result = highlighter.getBestFragments(tokenStream, text, 30, "...");
StringBuilder writer = new StringBuilder("");
writer.append("<html>");
writer.append("<style>\n" +
".highlight {\n" +
" background: yellow;\n" +
"}\n" +
"</style>");
writer.append("<body>");
writer.append("");
writer.append("</body></html>");
return ( writer.toString() );
}
示例3: searToHighlighterCss
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
/**
* ����
* @param analyzer
* @param searcher
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public void searToHighlighterCss(Analyzer analyzer,IndexSearcher searcher) throws IOException, InvalidTokenOffsetsException{
Term term =new Term("Content", new String("免费".getBytes(),"GBK"));//��ѯ��������˼����Ҫ�����Ա�Ϊ���������
TermQuery query =new TermQuery(term);
TopDocs docs =searcher.search(query, 10);//����
/**�Զ����ע�����ı���ǩ*/
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"hightlighterCss\">","</span>");
/**����QueryScorer*/
QueryScorer scorer=new QueryScorer(query);
/**����Fragmenter*/
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
Highlighter highlight=new Highlighter(formatter,scorer);
highlight.setTextFragmenter(fragmenter);
for(ScoreDoc doc:docs.scoreDocs){//��ȡ���ҵ��ĵ����������
Document document =searcher.doc(doc.doc);
String value = document.getField("Content").toString();
TokenStream tokenStream = analyzer.tokenStream("Content", new StringReader(value));
String str1 = highlight.getBestFragment(tokenStream, value);
System.out.println(str1);
}
}
示例4: getHighlighterList
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
private List<LumongoHighlighter> getHighlighterList(List<HighlightRequest> highlightRequests, Query q) {
if (highlightRequests.isEmpty()) {
return Collections.emptyList();
}
List<LumongoHighlighter> highlighterList = new ArrayList<>();
for (HighlightRequest highlight : highlightRequests) {
QueryScorer queryScorer = new QueryScorer(q, highlight.getField());
queryScorer.setExpandMultiTermQuery(true);
Fragmenter fragmenter = new SimpleSpanFragmenter(queryScorer, highlight.getFragmentLength());
SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter(highlight.getPreTag(), highlight.getPostTag());
LumongoHighlighter highlighter = new LumongoHighlighter(simpleHTMLFormatter, queryScorer, highlight);
highlighter.setTextFragmenter(fragmenter);
highlighterList.add(highlighter);
}
return highlighterList;
}
示例5: displayHtmlHighlight
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
static String displayHtmlHighlight(Query query, Analyzer analyzer, String fieldName, String fieldContent,
int fragmentSize) throws IOException, InvalidTokenOffsetsException {
Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color='red'>", "</font>"),
new QueryScorer(query));
Fragmenter fragmenter = new SimpleFragmenter(fragmentSize);
highlighter.setTextFragmenter(fragmenter);
return highlighter.getBestFragment(analyzer, fieldName, fieldContent);
}
示例6: search
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
public static void search(String indexDir, String q) throws Exception {
Directory dir = FSDirectory.open(Paths.get(indexDir));
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher is = new IndexSearcher(reader);
// Analyzer analyzer=new StandardAnalyzer(); // 标准分词器
SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
QueryParser parser = new QueryParser("desc", analyzer);
Query query = parser.parse(q);
long start = System.currentTimeMillis();
TopDocs hits = is.search(query, 10);
long end = System.currentTimeMillis();
System.out.println("匹配 " + q + " ,总共花费" + (end - start) + "毫秒" + "查询到" + hits.totalHits + "个记录");
QueryScorer scorer = new QueryScorer(query);
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
highlighter.setTextFragmenter(fragmenter);
for (ScoreDoc scoreDoc : hits.scoreDocs) {
Document doc = is.doc(scoreDoc.doc);
System.out.println(doc.get("city"));
System.out.println(doc.get("desc"));
String desc = doc.get("desc");
if (desc != null) {
TokenStream tokenStream = analyzer.tokenStream("desc", new StringReader(desc));
System.out.println(highlighter.getBestFragment(tokenStream, desc));
}
}
reader.close();
}
示例7: HighlightingHelper
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
HighlightingHelper(Query query, Analyzer analyzer) {
this.analyzer = analyzer;
Formatter formatter = new SimpleHTMLFormatter();
Encoder encoder = new MinimalHTMLEncoder();
scorer = new QueryScorer(query);
highlighter = new Highlighter(formatter, encoder, scorer);
fragmentLength = DEFAULT_FRAGMENT_LENGTH;
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, fragmentLength);
highlighter.setTextFragmenter(fragmenter);
}
示例8: setFragmentLength
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
void setFragmentLength(int length) {
if (length < 1) {
throw new AssertionError("length must be at least 1");
}
// Create a new fragmenter if the length is different.
if (fragmentLength != length) {
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, length);
highlighter.setTextFragmenter(fragmenter);
fragmentLength = length;
}
}
示例9: getFragmenter
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
@Override
public Fragmenter getFragmenter(String fieldName, SolrParams params )
{
numRequests++;
params = SolrParams.wrapDefaults(params, defaults);
int fragsize = params.getFieldInt( fieldName, HighlightParams.FRAGSIZE, 100 );
return (fragsize <= 0) ? new NullFragmenter() : new LuceneGapFragmenter(fragsize);
}
示例10: testHighlighting
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
public void testHighlighting() throws Exception {
String text = "The quick brown fox jumps over the lazy dog";
TermQuery query = new TermQuery(new Term("field", "fox"));
TokenStream tokenStream = new SimpleAnalyzer().tokenStream("field", new StringReader(text));
QueryScorer scorer = new QueryScorer(query, "field");
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
Highlighter highlighter = new Highlighter(scorer);
highlighter.setTextFragmenter(fragmenter);
assertEquals("The quick brown <B>fox</B> jumps over the lazy dog",
highlighter.getBestFragment(tokenStream, text));
}
示例11: getFragmenter
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
/**
* Return a {@link org.apache.lucene.search.highlight.Fragmenter} appropriate for this field. If a fragmenter has
* not been configured for this field, fall back to the configured default or the solr default (
* {@link GapFragmenter}).
*
* @param fieldName
* The name of the field
* @param params
* The params controlling Highlighting
* @return An appropriate {@link org.apache.lucene.search.highlight.Fragmenter}.
*/
protected Fragmenter getFragmenter(String fieldName, SolrParams params)
{
String fmt = params.getFieldParam(fieldName, HighlightParams.FRAGMENTER);
SolrFragmenter frag = fragmenters.get(fmt);
if (frag == null)
{
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown fragmenter: " + fmt);
}
return frag.getFragmenter(fieldName, params);
}
示例12: getFragmenter
import org.apache.lucene.search.highlight.Fragmenter; //导入依赖的package包/类
/**
* Return a {@link org.apache.lucene.search.highlight.Fragmenter} appropriate for this field.
*
* @param fieldName The name of the field
* @param params The params controlling Highlighting
* @return An appropriate {@link org.apache.lucene.search.highlight.Fragmenter}.
*/
public Fragmenter getFragmenter(String fieldName, SolrParams params);