本文整理汇总了Java中org.apache.lucene.search.highlight.InvalidTokenOffsetsException类的典型用法代码示例。如果您正苦于以下问题:Java InvalidTokenOffsetsException类的具体用法?Java InvalidTokenOffsetsException怎么用?Java InvalidTokenOffsetsException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
InvalidTokenOffsetsException类属于org.apache.lucene.search.highlight包,在下文中一共展示了InvalidTokenOffsetsException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkGeoQueryHighlighting
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
public void checkGeoQueryHighlighting(Query geoQuery) throws IOException, InvalidTokenOffsetsException {
Map analysers = new HashMap<String, Analyzer>();
analysers.put("text", new StandardAnalyzer());
FieldNameAnalyzer fieldNameAnalyzer = new FieldNameAnalyzer(analysers);
Query termQuery = new TermQuery(new Term("text", "failure"));
Query boolQuery = new BooleanQuery.Builder().add(new BooleanClause(geoQuery, BooleanClause.Occur.SHOULD))
.add(new BooleanClause(termQuery, BooleanClause.Occur.SHOULD)).build();
org.apache.lucene.search.highlight.Highlighter highlighter =
new org.apache.lucene.search.highlight.Highlighter(new CustomQueryScorer(boolQuery));
String fragment = highlighter.getBestFragment(fieldNameAnalyzer.tokenStream("text", "Arbitrary text field which should not cause " +
"a failure"), "Arbitrary text field which should not cause a failure");
assertThat(fragment, equalTo("Arbitrary text field which should not cause a <B>failure</B>"));
Query rewritten = boolQuery.rewrite(null);
highlighter = new org.apache.lucene.search.highlight.Highlighter(new CustomQueryScorer(rewritten));
fragment = highlighter.getBestFragment(fieldNameAnalyzer.tokenStream("text", "Arbitrary text field which should not cause " +
"a failure"), "Arbitrary text field which should not cause a failure");
assertThat(fragment, equalTo("Arbitrary text field which should not cause a <B>failure</B>"));
}
示例2: selectOnlyNotAlreadyIndexedFilesFromACollection
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* Select only not already indexed files from a collection.
*
* @param fileCollection the file collection
* @param location the location
* @return the linked list
* @throws IOException Signals that an I/O exception has occurred.
* @throws ParseException the parse exception
* @throws InvalidTokenOffsetsException the invalid token offsets exception
*/
// Return the collection with extracted (not indexed) Files
public static LinkedList<File> selectOnlyNotAlreadyIndexedFilesFromACollection(List<File> fileCollection, String location) throws IOException, ParseException, InvalidTokenOffsetsException {
// Getting the path of our index and opens a reader to read the index in the current folder
Path dirPath = Paths.get(location);
FSDirectory dir = FSDirectory.open(dirPath);
DirectoryReader indexReader = DirectoryReader.open(dir);
LinkedList<File> onlyNotIndexedFiles = new LinkedList<File>();
for (File file : fileCollection) {
// Creates new Term using the path of the html file
Term indexTerm = new Term("path", file.getPath());
// Checks if a document in the index has the same path
if (indexReader.docFreq(indexTerm) > 0)
{
// If yes do not add the document
continue;
}
else {
onlyNotIndexedFiles.add(file);
}
}
indexReader.close();
return onlyNotIndexedFiles;
}
示例3: highlight
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
String highlight(String fieldName, String text) {
if (text == null) {
return null;
}
try {
String highlighted = highlighter.getBestFragment(analyzer, fieldName, text);
if (highlighted == null) {
return null;
}
highlighted = cleanUpPattern.matcher(highlighted).replaceAll("");
highlighted = replaceLFPattern.matcher(highlighted).replaceAll(" ");
if (highlighted.isEmpty()) {
highlighted = null;
}
return highlighted;
} catch (InvalidTokenOffsetsException|IOException e) {
return null;
}
}
示例4: searToHighlighterCss
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* ����
* @param analyzer
* @param searcher
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public void searToHighlighterCss(Analyzer analyzer,IndexSearcher searcher) throws IOException, InvalidTokenOffsetsException{
Term term =new Term("Content", new String("免费".getBytes(),"GBK"));//��ѯ��������˼����Ҫ�����Ա�Ϊ���������
TermQuery query =new TermQuery(term);
TopDocs docs =searcher.search(query, 10);//����
/**�Զ����ע�����ı���ǩ*/
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter("<span class=\"hightlighterCss\">","</span>");
/**����QueryScorer*/
QueryScorer scorer=new QueryScorer(query);
/**����Fragmenter*/
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
Highlighter highlight=new Highlighter(formatter,scorer);
highlight.setTextFragmenter(fragmenter);
for(ScoreDoc doc:docs.scoreDocs){//��ȡ���ҵ��ĵ����������
Document document =searcher.doc(doc.doc);
String value = document.getField("Content").toString();
TokenStream tokenStream = analyzer.tokenStream("Content", new StringReader(value));
String str1 = highlight.getBestFragment(tokenStream, value);
System.out.println(str1);
}
}
示例5: translateDocs
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
*
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
private List<Person> translateDocs(final LuceneIndex<Person> luceneDb, final IndexSearcher searcher, final TopDocs topDocs, final Query query) throws IOException, InvalidTokenOffsetsException {
final List<Person> dtcResult = new ArrayList<>();
for (final ScoreDoc scoreDoc : topDocs.scoreDocs) {
final Document document = searcher.doc(scoreDoc.doc);
final Person dto = luceneDb.getObjectIndexed(document.get(UUID_KEY));
dto.setScore(Math.round(scoreDoc.score / Math.max(topDocs.getMaxScore(), 2.5f) * 100));
dtcResult.add(dto);
//final QueryScorer queryScorer = new QueryScorer(query);
//final Highlighter highlighter = new Highlighter(queryScorer);
//highlighter.setTextFragmenter(new SimpleSpanFragmenter(queryScorer, Integer.MAX_VALUE));
//highlighter.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
//final String[] strings = highlighter.getBestFragments(indexAnalyser, "fullname", dto.getName() + " " + dto.getFirstname(), 5);
//System.out.println("found: " + Arrays.toString(strings));
}
return dtcResult;
}
示例6: highlight
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
@Override
public String highlight(String locale, boolean useStopWords, String query, String content, String pre, String post, int preview) {
Analyzer analyzer = termAnalyzers.findAnalyzer(locale, useStopWords);
QueryParser parser = new QueryParser(defaultField, analyzer);
String summary = null;
try {
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(pre, post);
Highlighter hg = new Highlighter(formatter, new QueryTermScorer(parser.parse(query)));
hg.setMaxDocCharsToAnalyze(preview);
hg.setTextFragmenter(new SimpleFragmenter(100));
TokenStream tokens = TokenSources.getTokenStream(defaultField, content, analyzer);
summary = hg.getBestFragments(tokens, content, 4, " ... ");
} catch (InvalidTokenOffsetsException | IOException | ParseException ex) {
LOG.error("Failed to highlight", ex);
}
return StringUtils.isBlank(summary) ? null : summary;
}
示例7: highlightField
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* This method intended for use with
* <tt>testHighlightingWithDefaultField()</tt>
*/
private String highlightField(Query query, String fieldName,
String text) throws IOException, InvalidTokenOffsetsException {
TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE,
true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName,
new StringReader(text));
// Assuming "<B>", "</B>" used to highlight
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);
Highlighter highlighter = new Highlighter(formatter, scorer);
highlighter.setTextFragmenter(new SimpleFragmenter(Integer.MAX_VALUE));
String rv = highlighter.getBestFragments(tokenStream, text, 1,
"(FIELD TEXT TRUNCATED)");
return rv.length() == 0 ? text : rv;
}
示例8: displayHtmlHighlight
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
static String displayHtmlHighlight(Query query, Analyzer analyzer, String fieldName, String fieldContent,
int fragmentSize) throws IOException, InvalidTokenOffsetsException {
Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color='red'>", "</font>"),
new QueryScorer(query));
Fragmenter fragmenter = new SimpleFragmenter(fragmentSize);
highlighter.setTextFragmenter(fragmenter);
return highlighter.getBestFragment(analyzer, fieldName, fieldContent);
}
示例9: performHighlighting
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* Performs highlighting for a given query and a given document.
*
* @param indexSearcher the IndexSearcher performing the query
* @param query the Tripod LuceneQuery
* @param scoreDoc the Lucene ScoreDoc
* @param doc the Lucene Document
* @param highlighter the Highlighter to use
* @param result the QueryResult to add the highlights to
* @throws IOException if an error occurs performing the highlighting
* @throws InvalidTokenOffsetsException if an error occurs performing the highlighting
*/
protected void performHighlighting(final IndexSearcher indexSearcher, final Query query, final ScoreDoc scoreDoc,
final Document doc, final Highlighter highlighter, final QR result)
throws IOException, InvalidTokenOffsetsException {
if (query.getHighlightFields() == null || query.getHighlightFields().isEmpty()) {
return;
}
final List<Highlight> highlights = new ArrayList<>();
final List<String> hlFieldNames = getHighlightFieldNames(query, doc);
// process each field to highlight on
for (String hlField : hlFieldNames) {
final String text = doc.get(hlField);
if (StringUtils.isEmpty(text)) {
continue;
}
final List<String> snippets = new ArrayList<>();
final Fields tvFields = indexSearcher.getIndexReader().getTermVectors(scoreDoc.doc);
final int maxStartOffset = highlighter.getMaxDocCharsToAnalyze() -1;
// get the snippets for the given field
final TokenStream tokenStream = TokenSources.getTokenStream(hlField, tvFields, text, analyzer, maxStartOffset);
final TextFragment[] textFragments = highlighter.getBestTextFragments(tokenStream, text, false, 10);
for (TextFragment textFragment : textFragments) {
if (textFragment != null && textFragment.getScore() > 0) {
snippets.add(textFragment.toString());
}
}
// if we have snippets then add a highlight result to the QueryResult
if (snippets.size() > 0) {
highlights.add(new Highlight(hlField, snippets));
}
}
result.setHighlights(highlights);
}
示例10: search
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
@ResponseBody
@RequestMapping(value = "/search/{key}", method = RequestMethod.GET,
produces="application/json;charset=UTF-8")
public String search(@PathVariable String key) throws IOException,
ParseException, InvalidTokenOffsetsException {
return searchData(key);
}
示例11: testHighlightCustomQuery
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
public void testHighlightCustomQuery() throws IOException,
InvalidTokenOffsetsException {
String s1 = "I call our world Flatland, not because we call it so,";
// Verify that a query against the default field results in text being
// highlighted
// regardless of the field name.
CustomQuery q = new CustomQuery(new Term(FIELD_NAME, "world"));
String expected = "I call our <B>world</B> Flatland, not because we call it so,";
String observed = highlightField(q, "SOME_FIELD_NAME", s1);
if (VERBOSE)
System.out.println("Expected: \"" + expected + "\n" + "Observed: \""
+ observed);
assertEquals(
"Query in the default field results in text for *ANY* field being highlighted",
expected, observed);
// Verify that a query against a named field does not result in any
// highlighting
// when the query field name differs from the name of the field being
// highlighted,
// which in this example happens to be the default field name.
q = new CustomQuery(new Term("text", "world"));
expected = s1;
observed = highlightField(q, FIELD_NAME, s1);
if (VERBOSE)
System.out.println("Expected: \"" + expected + "\n" + "Observed: \""
+ observed);
assertEquals(
"Query in a named field does not result in highlighting when that field isn't in the query",
s1, highlightField(q, FIELD_NAME, s1));
}
示例12: highlightField
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* This method intended for use with
* <tt>testHighlightingWithDefaultField()</tt>
*/
private String highlightField(Query query, String fieldName,
String text) throws IOException, InvalidTokenOffsetsException {
TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE,
true, MockTokenFilter.ENGLISH_STOPSET).tokenStream(fieldName, text);
// Assuming "<B>", "</B>" used to highlight
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);
Highlighter highlighter = new Highlighter(formatter, scorer);
highlighter.setTextFragmenter(new SimpleFragmenter(Integer.MAX_VALUE));
String rv = highlighter.getBestFragments(tokenStream, text, 1,
"(FIELD TEXT TRUNCATED)");
return rv.length() == 0 ? text : rv;
}
示例13: doHighlight
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
/**
* Highlight (bold,color) query words in result-document. Set HighlightResult for content or description.
*
* @param query
* @param analyzer
* @param doc
* @param resultDocument
* @throws IOException
*/
private void doHighlight(final Query query, final Analyzer analyzer, final Document doc, final ResultDocument resultDocument) throws IOException {
final Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(HIGHLIGHT_PRE_TAG, HIGHLIGHT_POST_TAG), new QueryScorer(query));
// Get 3 best fragments of content and seperate with a "..."
try {
// highlight content
final String content = doc.get(AbstractOlatDocument.CONTENT_FIELD_NAME);
TokenStream tokenStream = analyzer.tokenStream(AbstractOlatDocument.CONTENT_FIELD_NAME, new StringReader(content));
String highlightResult = highlighter.getBestFragments(tokenStream, content, 3, HIGHLIGHT_SEPARATOR);
// if no highlightResult is in content => look in description
if (highlightResult.length() == 0) {
final String description = doc.get(AbstractOlatDocument.DESCRIPTION_FIELD_NAME);
tokenStream = analyzer.tokenStream(AbstractOlatDocument.DESCRIPTION_FIELD_NAME, new StringReader(description));
highlightResult = highlighter.getBestFragments(tokenStream, description, 3, HIGHLIGHT_SEPARATOR);
resultDocument.setHighlightingDescription(true);
}
resultDocument.setHighlightResult(highlightResult);
// highlight title
final String title = doc.get(AbstractOlatDocument.TITLE_FIELD_NAME);
tokenStream = analyzer.tokenStream(AbstractOlatDocument.TITLE_FIELD_NAME, new StringReader(title));
final String highlightTitle = highlighter.getBestFragments(tokenStream, title, 3, " ");
resultDocument.setHighlightTitle(highlightTitle);
} catch (final InvalidTokenOffsetsException e) {
log.warn("", e);
}
}
示例14: executeQuery
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
private List<Person> executeQuery(final LuceneIndex<Person> luceneDb, final Query query, final int maxRow) throws IOException, InvalidTokenOffsetsException {
try (final IndexReader indexReader = luceneDb.createIndexReader()) {
final IndexSearcher searcher = new IndexSearcher(indexReader);
//1. Exécution des la Requête
final TopDocs topDocs = searcher.search(query, null, maxRow);
//2. Traduction du résultat Lucene en une Collection
return translateDocs(luceneDb, searcher, topDocs, query);
} catch (final TooManyClauses e) {
throw new RuntimeException("Too many clauses", e);
}
}
示例15: testGeoPointInBBoxQueryHighlighting
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; //导入依赖的package包/类
public void testGeoPointInBBoxQueryHighlighting() throws IOException, InvalidTokenOffsetsException {
Query geoQuery = new GeoPointDistanceQuery("geo_point", -64.92354174306496, -170.15625, 5576757);
checkGeoQueryHighlighting(geoQuery);
}