本文整理汇总了Java中org.apache.solr.core.SolrCore.getSearcher方法的典型用法代码示例。如果您正苦于以下问题:Java SolrCore.getSearcher方法的具体用法?Java SolrCore.getSearcher怎么用?Java SolrCore.getSearcher使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.solr.core.SolrCore
的用法示例。
在下文中一共展示了SolrCore.getSearcher方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSimilarity
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
/** returns the similarity in use for the field */
protected Similarity getSimilarity(String field) {
SolrCore core = h.getCore();
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Similarity sim = searcher.get().getSimilarity();
searcher.decref();
while (sim instanceof PerFieldSimilarityWrapper) {
sim = ((PerFieldSimilarityWrapper)sim).get(field);
}
return sim;
}
示例2: test
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
FileBasedSpellChecker checker = new FileBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", FileBasedSpellChecker.class.getName());
spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external");
spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt");
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop");
spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8");
File indexDir = createTempDir(LuceneTestCase.getTestClass().getSimpleName());
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
}
示例3: testFieldType
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testFieldType() throws Exception {
FileBasedSpellChecker checker = new FileBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", FileBasedSpellChecker.class.getName());
spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external");
spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt");
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop");
spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8");
File indexDir = createTempDir();
indexDir.mkdirs();
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
spellchecker.add(SolrSpellChecker.FIELD_TYPE, "teststop");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("Solar");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
}
示例4: testRAMDirectory
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
/**
* No indexDir location set
*/
@Test
public void testRAMDirectory() throws Exception {
FileBasedSpellChecker checker = new FileBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", FileBasedSpellChecker.class.getName());
spellchecker.add(SolrSpellChecker.DICTIONARY_NAME, "external");
spellchecker.add(AbstractLuceneSpellChecker.LOCATION, "spellings.txt");
spellchecker.add(FileBasedSpellChecker.SOURCE_FILE_CHAR_ENCODING, "UTF-8");
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "teststop");
spellchecker.add(SolrSpellChecker.FIELD_TYPE, "teststop");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("solar");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
searcher.decref();
}
示例5: testAlternateDistance
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testAlternateDistance() throws Exception {
TestSpellChecker checker = new TestSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", IndexBasedSpellChecker.class.getName());
File indexDir = createTempDir();
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
spellchecker.add(AbstractLuceneSpellChecker.STRING_DISTANCE, JaroWinklerDistance.class.getName());
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
SpellChecker sc = checker.getSpellChecker();
assertTrue("sc is null and it shouldn't be", sc != null);
StringDistance sd = sc.getStringDistance();
assertTrue("sd is null and it shouldn't be", sd != null);
assertTrue("sd is not an instance of " + JaroWinklerDistance.class.getName(), sd instanceof JaroWinklerDistance);
} finally {
holder.decref();
}
}
示例6: test
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
DirectSolrSpellChecker checker = new DirectSolrSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", DirectSolrSpellChecker.class.getName());
spellchecker.add(SolrSpellChecker.FIELD, "teststop");
spellchecker.add(DirectSolrSpellChecker.MINQUERYLENGTH, 2); // we will try "fob"
SolrCore core = h.getCore();
checker.init(spellchecker, core);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertFalse(entry.getValue() + " equals: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
}
示例7: JoinQueryWeight
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
public JoinQueryWeight(SolrIndexSearcher searcher) {
this.fromSearcher = searcher;
SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
if (info != null) {
rb = info.getResponseBuilder();
}
if (fromIndex == null) {
this.fromSearcher = searcher;
} else {
if (info == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join must have SolrRequestInfo");
}
CoreContainer container = searcher.getCore().getCoreDescriptor().getCoreContainer();
final SolrCore fromCore = container.getCore(fromIndex);
if (fromCore == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join: no such core " + fromIndex);
}
if (info.getReq().getCore() == fromCore) {
// if this is the same core, use the searcher passed in... otherwise we could be warming and
// get an older searcher from the core.
fromSearcher = searcher;
} else {
// This could block if there is a static warming query with a join in it, and if useColdSearcher is true.
// Deadlock could result if two cores both had useColdSearcher and had joins that used eachother.
// This would be very predictable though (should happen every time if misconfigured)
fromRef = fromCore.getSearcher(false, true, null);
// be careful not to do anything with this searcher that requires the thread local
// SolrRequestInfo in a manner that requires the core in the request to match
fromSearcher = fromRef.get();
}
if (fromRef != null) {
final RefCounted<SolrIndexSearcher> ref = fromRef;
info.addCloseHook(new Closeable() {
@Override
public void close() {
ref.decref();
}
});
}
info.addCloseHook(new Closeable() {
@Override
public void close() {
fromCore.close();
}
});
}
this.toSearcher = searcher;
}
示例8: testStandAlone
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testStandAlone() throws Exception {
SolrCore core = h.getCore();
WordBreakSolrSpellChecker checker = new WordBreakSolrSpellChecker();
NamedList<String> params = new NamedList<>();
params.add("field", "lowerfilt");
params.add(WordBreakSolrSpellChecker.PARAM_BREAK_WORDS, "true");
params.add(WordBreakSolrSpellChecker.PARAM_COMBINE_WORDS, "true");
params.add(WordBreakSolrSpellChecker.PARAM_MAX_CHANGES, "10");
checker.init(params, core);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
QueryConverter qc = new SpellingQueryConverter();
qc.setAnalyzer(new MockAnalyzer(random()));
Collection<Token> tokens = qc.convert("paintable pine apple good ness");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader(), 10);
SpellingResult result = checker.getSuggestions(spellOpts);
searcher.decref();
assertTrue(result != null && result.getSuggestions() != null);
assertTrue(result.getSuggestions().size()==6);
for(Map.Entry<Token, LinkedHashMap<String, Integer>> s : result.getSuggestions().entrySet()) {
Token orig = s.getKey();
String[] corr = s.getValue().keySet().toArray(new String[0]);
if(orig.toString().equals("paintable")) {
assertTrue(orig.startOffset()==0);
assertTrue(orig.endOffset()==9);
assertTrue(orig.length()==9);
assertTrue(corr.length==3);
assertTrue(corr[0].equals("paint able")); //1 op ; max doc freq=5
assertTrue(corr[1].equals("pain table")); //1 op ; max doc freq=2
assertTrue(corr[2].equals("pa in table")); //2 ops
} else if(orig.toString().equals("pine apple")) {
assertTrue(orig.startOffset()==10);
assertTrue(orig.endOffset()==20);
assertTrue(orig.length()==10);
assertTrue(corr.length==1);
assertTrue(corr[0].equals("pineapple"));
} else if(orig.toString().equals("paintable pine")) {
assertTrue(orig.startOffset()==0);
assertTrue(orig.endOffset()==14);
assertTrue(orig.length()==14);
assertTrue(corr.length==1);
assertTrue(corr[0].equals("paintablepine"));
} else if(orig.toString().equals("good ness")) {
assertTrue(orig.startOffset()==21);
assertTrue(orig.endOffset()==30);
assertTrue(orig.length()==9);
assertTrue(corr.length==1);
assertTrue(corr[0].equals("goodness"));
} else if(orig.toString().equals("pine apple good ness")) {
assertTrue(orig.startOffset()==10);
assertTrue(orig.endOffset()==30);
assertTrue(orig.length()==20);
assertTrue(corr.length==1);
assertTrue(corr[0].equals("pineapplegoodness"));
} else if(orig.toString().equals("pine")) {
assertTrue(orig.startOffset()==10);
assertTrue(orig.endOffset()==14);
assertTrue(orig.length()==4);
assertTrue(corr.length==1);
assertTrue(corr[0].equals("pi ne"));
} else {
fail("Unexpected original result: " + orig);
}
}
}
示例9: testSpelling
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testSpelling() throws Exception {
IndexBasedSpellChecker checker = new IndexBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", IndexBasedSpellChecker.class.getName());
File indexDir = createTempDir();
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
//test something that is spelled correctly
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is null and it shouldn't be", suggestions == null);
//Has multiple possibilities, but the exact exists, so that should be returned
spellOpts.tokens = queryConverter.convert("red");
spellOpts.count = 2;
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
//Try out something which should have multiple suggestions
spellOpts.tokens = queryConverter.convert("bug");
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertNotNull(suggestions);
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 2, suggestions.size() == 2);
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
} finally {
holder.decref();
}
}
示例10: testExtendedResults
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testExtendedResults() throws Exception {
IndexBasedSpellChecker checker = new IndexBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", IndexBasedSpellChecker.class.getName());
File indexDir = createTempDir();
indexDir.mkdirs();
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + 2, entry.getValue() == 2);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
} finally {
holder.decref();
}
}
示例11: testAlternateLocation
import org.apache.solr.core.SolrCore; //导入方法依赖的package包/类
@Test
public void testAlternateLocation() throws Exception {
String[] ALT_DOCS = new String[]{
"jumpin jack flash",
"Sargent Peppers Lonely Hearts Club Band",
"Born to Run",
"Thunder Road",
"Londons Burning",
"A Horse with No Name",
"Sweet Caroline"
};
IndexBasedSpellChecker checker = new IndexBasedSpellChecker();
NamedList spellchecker = new NamedList();
spellchecker.add("classname", IndexBasedSpellChecker.class.getName());
File tmpDir = createTempDir();
File indexDir = new File(tmpDir, "spellingIdx");
//create a standalone index
File altIndexDir = new File(tmpDir, "alternateIdx" + new Date().getTime());
Directory dir = newFSDirectory(altIndexDir);
IndexWriter iw = new IndexWriter(
dir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer())
);
for (int i = 0; i < ALT_DOCS.length; i++) {
Document doc = new Document();
doc.add(new TextField("title", ALT_DOCS[i], Field.Store.YES));
iw.addDocument(doc);
}
iw.forceMerge(1);
iw.close();
dir.close();
indexDir.mkdirs();
spellchecker.add(AbstractLuceneSpellChecker.INDEX_DIR, indexDir.getAbsolutePath());
spellchecker.add(AbstractLuceneSpellChecker.LOCATION, altIndexDir.getAbsolutePath());
spellchecker.add(AbstractLuceneSpellChecker.FIELD, "title");
spellchecker.add(AbstractLuceneSpellChecker.SPELLCHECKER_ARG_NAME, spellchecker);
SolrCore core = h.getCore();
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("flesh");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("flesh is null and it shouldn't be", suggestions != null);
assertTrue("flesh Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "flash", entry.getKey().equals("flash") == true);
assertTrue(entry.getValue() + " does not equal: " + 1, entry.getValue() == 1);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
spellOpts.tokens = queryConverter.convert("Caroline");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
} finally {
holder.decref();
}
}