本文整理汇总了Java中org.apache.lucene.analysis.core.KeywordAnalyzer类的典型用法代码示例。如果您正苦于以下问题:Java KeywordAnalyzer类的具体用法?Java KeywordAnalyzer怎么用?Java KeywordAnalyzer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
KeywordAnalyzer类属于org.apache.lucene.analysis.core包,在下文中一共展示了KeywordAnalyzer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testTopLevel
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public void testTopLevel() throws Exception {
Aggregation result;
if (randomBoolean()) {
result = testCase(new MatchAllDocsQuery(), topHits("_name").sort("string", SortOrder.DESC));
} else {
Query query = new QueryParser("string", new KeywordAnalyzer()).parse("d^1000 c^100 b^10 a^1");
result = testCase(query, topHits("_name"));
}
SearchHits searchHits = ((TopHits) result).getHits();
assertEquals(3L, searchHits.getTotalHits());
assertEquals("3", searchHits.getAt(0).getId());
assertEquals("type", searchHits.getAt(0).getType());
assertEquals("2", searchHits.getAt(1).getId());
assertEquals("type", searchHits.getAt(1).getType());
assertEquals("1", searchHits.getAt(2).getId());
assertEquals("type", searchHits.getAt(2).getType());
}
示例2: parseTokens
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
/**
* Parses the query. Using this instead of a QueryParser in order
* to avoid thread-safety issues with Lucene's query parser.
*
* @param fieldName the name of the field
* @param value the value of the field
* @return the parsed query
*/
private Query parseTokens(String fieldName, String value) {
BooleanQuery searchQuery = new BooleanQuery();
if (value != null) {
Analyzer analyzer = new KeywordAnalyzer();
try {
TokenStream tokenStream =
analyzer.tokenStream(fieldName, new StringReader(value));
tokenStream.reset();
CharTermAttribute attr =
tokenStream.getAttribute(CharTermAttribute.class);
while (tokenStream.incrementToken()) {
String term = attr.toString();
Query termQuery = new TermQuery(new Term(fieldName, term));
searchQuery.add(termQuery, Occur.SHOULD);
}
} catch (IOException e) {
throw new DukeException("Error parsing input string '" + value + "' " +
"in field " + fieldName);
}
}
return searchQuery;
}
示例3: testAnalyzerAlias
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public void testAnalyzerAlias() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.analyzer.foobar.alias","default")
.put("index.analysis.analyzer.foobar.type", "keyword")
.put("index.analysis.analyzer.foobar_search.alias","default_search")
.put("index.analysis.analyzer.foobar_search.type","english")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
// analyzer aliases are only allowed in 2.x indices
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5))
.build();
AnalysisRegistry newRegistry = getNewRegistry(settings);
IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings);
assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class)));
assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class)));
assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " +
"5.x; analyzer aliases can no longer be created on new indices.",
"setting [index.analysis.analyzer.foobar_search.alias] is only allowed on index [test] because it was created before " +
"5.x; analyzer aliases can no longer be created on new indices.");
}
示例4: FbEntitySearcher
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public FbEntitySearcher(String indexDir, int numOfDocs, String searchingStrategy) throws IOException {
LogInfo.begin_track("Constructing Searcher");
if (!searchingStrategy.equals("exact") && !searchingStrategy.equals("inexact"))
throw new RuntimeException("Bad searching strategy: " + searchingStrategy);
this.searchStrategy = searchingStrategy;
queryParser = new QueryParser(
Version.LUCENE_44,
FbIndexField.TEXT.fieldName(),
searchingStrategy.equals("exact") ? new KeywordAnalyzer() : new StandardAnalyzer(Version.LUCENE_44));
LogInfo.log("Opening index dir: " + indexDir);
IndexReader indexReader = DirectoryReader.open(SimpleFSDirectory.open(new File(indexDir)));
indexSearcher = new IndexSearcher(indexReader);
LogInfo.log("Opened index with " + indexReader.numDocs() + " documents.");
this.numOfDocs = numOfDocs;
LogInfo.end_track();
}
示例5: loadAnalyzerFactory
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
@Override
protected void loadAnalyzerFactory(Map<String, AnalyzerInfo> analyzerFactoryMap) {
//extract entire word
registerAnalyzer(analyzerFactoryMap, "keyword", "Keyword Analyzer", new DefaultAnalyzerFactory(KeywordAnalyzer.class));
//lucene StandardAnalyzer
registerAnalyzer(analyzerFactoryMap, "standard", "Standard Analyzer", new DefaultAnalyzerFactory(StandardAnalyzer.class));
registerAnalyzer(analyzerFactoryMap, "ngram", "NGram Analyzer", new DefaultAnalyzerFactory(NGramWordAnalyzer.class));
registerAnalyzer(analyzerFactoryMap, "primary", "Primary Word Analyzer", new DefaultAnalyzerFactory(PrimaryWordAnalyzer.class));
registerAnalyzer(analyzerFactoryMap, "whitespace", "Whitespace Analyzer", new DefaultAnalyzerFactory(WhitespaceAnalyzer.class));
registerAnalyzer(analyzerFactoryMap, "csv", "Comma separated value Analyzer", new DefaultAnalyzerFactory(CSVAnalyzer.class));
registerAnalyzer(analyzerFactoryMap, "autocomplete", "Autocomplete Analyzer", new DefaultAnalyzerFactory(AutocompleteAnalyzer.class));
}
示例6: suggestEndpointOptions
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
@Override
public String[] suggestEndpointOptions(Set<String> names, String unknownOption) {
// each option must be on a separate line in a String
StringBuilder sb = new StringBuilder();
for (String name : names) {
sb.append(name);
sb.append("\n");
}
StringReader reader = new StringReader(sb.toString());
try {
PlainTextDictionary words = new PlainTextDictionary(reader);
// use in-memory lucene spell checker to make the suggestions
RAMDirectory dir = new RAMDirectory();
SpellChecker checker = new SpellChecker(dir);
checker.indexDictionary(words, new IndexWriterConfig(new KeywordAnalyzer()), false);
return checker.suggestSimilar(unknownOption, maxSuggestions);
} catch (Exception e) {
// ignore
}
return null;
}
示例7: CodeSearcher
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public CodeSearcher(String indexDir, String field) {
logger.info("index directory: "+ indexDir);
this.field = field;
this.indexDir = indexDir;
try {
this.reader = DirectoryReader.open(FSDirectory.open(new File(
this.indexDir)));
} catch (IOException e) {
logger.error("cant get the reader to index dir, exiting, "
+ indexDir);
e.printStackTrace();
System.exit(1);
}
this.searcher = new IndexSearcher(this.reader);
this.analyzer = new KeywordAnalyzer();//
//new WhitespaceAnalyzer(Version.LUCENE_46); // TODO: pass
// the
// analyzer
// as
// argument
// to
// constructor
new CloneHelper(); // i don't remember why we are making this object?
this.queryParser = new QueryParser(Version.LUCENE_46, this.field,
analyzer);
}
示例8: prepareIndex
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public void prepareIndex() throws IOException {
File globalWFMDIr = new File(Util.GTPM_INDEX_DIR);
if (!globalWFMDIr.exists()) {
Util.createDirs(Util.GTPM_INDEX_DIR);
}
KeywordAnalyzer keywordAnalyzer = new KeywordAnalyzer();
IndexWriterConfig wfmIndexWriterConfig = new IndexWriterConfig(Version.LUCENE_46, keywordAnalyzer);
wfmIndexWriterConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
wfmIndexWriterConfig.setRAMBufferSizeMB(1024);
logger.info("PREPARE INDEX");
try {
wfmIndexWriter = new IndexWriter(FSDirectory.open(new File(Util.GTPM_INDEX_DIR)), wfmIndexWriterConfig);
wfmIndexWriter.commit();
wfmIndexer = new DocumentMaker(wfmIndexWriter);
} catch (IOException e) {
e.printStackTrace();
}
}
示例9: VectorCache
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public VectorCache(int dimension, int cacheSize) throws IOException {
this.dimension = dimension;
this.cacheSize = cacheSize;
this.vectorCache = CacheBuilder.newBuilder()
.maximumSize(this.cacheSize)
.build(
new CacheLoader<String, Vector>() {
@Override
public Vector load(String key) throws IOException {
return getVectorFromIndex(key);
}
});
IndexWriterConfig iwc = new IndexWriterConfig(Version.LATEST, new KeywordAnalyzer());
writer = new IndexWriter(FSDirectory.open(new File("./VC_" + ID)), iwc);
dirReader = DirectoryReader.open(writer, true);
searcher = new IndexSearcher(dirReader);
}
示例10: openIndexForSearching
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
public void openIndexForSearching(boolean useDerivedIndex) {
try {
if (useDerivedIndex)
reader = DirectoryReader.open(FSDirectory.open(new File(folder + "/" + DERIVED_INDEX_FOLDER)));
else
reader = DirectoryReader.open(FSDirectory.open(new File(folder + "/" + MAIN_INDEX_FOLDER)));
searcher = new IndexSearcher(reader);
searcher.setSimilarity(new DefaultSimilarity());
BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
QueryParser typeQueryParser = new QueryParser(Version.LUCENE_4_9, "TYPE", new KeywordAnalyzer());
conceptQuery = typeQueryParser.parse(CONCEPT_TYPE_STRING);
conceptIdQueryParser = new QueryParser(Version.LUCENE_4_9, "CONCEPT_ID", new KeywordAnalyzer());
conceptClassQueryParser = new QueryParser(Version.LUCENE_4_9, "CONCEPT_CLASS_ID", new KeywordAnalyzer());
vocabularyQueryParser = new QueryParser(Version.LUCENE_4_9, "VOCABULARY_ID", new KeywordAnalyzer());
keywordsQueryParser = new QueryParser(Version.LUCENE_4_9, "TERM", analyzer);
domainQueryParser = new QueryParser(Version.LUCENE_4_9, "DOMAIN_ID", new KeywordAnalyzer());
standardConceptQueryParser = new QueryParser(Version.LUCENE_4_9, "STANDARD_CONCEPT", new KeywordAnalyzer());
termTypeQueryParser = new QueryParser(Version.LUCENE_4_9, "TERM_TYPE", new KeywordAnalyzer());
numDocs = reader.numDocs();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例11: VocabularyNeo4jImpl
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
@Inject
public VocabularyNeo4jImpl(GraphDatabaseService graph,
@Nullable @IndicatesNeo4jGraphLocation String neo4jLocation, CurieUtil curieUtil,
NodeTransformer transformer) throws IOException {
this.graph = graph;
this.curieUtil = curieUtil;
this.transformer = transformer;
if (null != neo4jLocation) {
Directory indexDirectory =
FSDirectory.open((new File(new File(neo4jLocation), "index/lucene/node/node_auto_index"))
.toPath());
Directory spellDirectory =
FSDirectory.open((new File(new File(neo4jLocation), "index/lucene/spellchecker"))
.toPath());
spellChecker = new SpellChecker(spellDirectory);
try (IndexReader reader = DirectoryReader.open(indexDirectory)) {
IndexWriterConfig config = new IndexWriterConfig(new KeywordAnalyzer());
spellChecker.indexDictionary(new LuceneDictionary(reader, NodeProperties.LABEL
+ LuceneUtils.EXACT_SUFFIX), config, true);
}
} else {
spellChecker = null;
}
}
示例12: setup
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
@Before
public void setup() throws IOException {
TableContext.clear();
_base = new File(TMPDIR, "MutatableActionTest");
rmr(_base);
File file = new File(_base, TABLE);
file.mkdirs();
TableContext.clear();
TableDescriptor tableDescriptor = new TableDescriptor();
tableDescriptor.setName("test");
tableDescriptor.setTableUri(file.toURI().toString());
TableContext tableContext = TableContext.create(tableDescriptor);
ShardContext shardContext = ShardContext.create(tableContext, "test");
_action = new MutatableAction(shardContext);
_conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
}
示例13: createShard
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
private static void createShard(Configuration configuration, int i, Path path, int totalShardCount)
throws IOException {
HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path);
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
mergePolicy.setUseCompoundFile(false);
IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf);
Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>();
int partition = partitioner.getPartition(new IntWritable(i), null, totalShardCount);
assertEquals(i, partition);
Document doc = getDoc(i);
indexWriter.addDocument(doc);
indexWriter.close();
}
示例14: getReader
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
private IndexReader getReader() throws CorruptIndexException, LockObtainFailedException, IOException {
RAMDirectory directory = new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
IndexWriter writer = new IndexWriter(directory, conf);
Document doc = new Document();
doc.add(new StringField(BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE, Store.NO));
doc.add(new StringField("a", "b", Store.YES));
doc.add(new StringField("family", "f1", Store.YES));
Document doc1 = new Document();
doc.add(new StringField("a", "b", Store.YES));
writer.addDocument(doc);
writer.addDocument(doc1);
writer.close();
return DirectoryReader.open(directory);
}
示例15: getReaderWithDocsHavingFamily
import org.apache.lucene.analysis.core.KeywordAnalyzer; //导入依赖的package包/类
private IndexReader getReaderWithDocsHavingFamily() throws CorruptIndexException, LockObtainFailedException,
IOException {
RAMDirectory directory = new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
IndexWriter writer = new IndexWriter(directory, conf);
Document doc = new Document();
doc.add(new StringField(BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE, Store.NO));
doc.add(new StringField("a", "b", Store.YES));
doc.add(new StringField("family", "f2", Store.YES));
Document doc1 = new Document();
doc1.add(new StringField("a", "b", Store.YES));
doc1.add(new StringField("family", "f1", Store.YES));
writer.addDocument(doc);
writer.addDocument(doc1);
writer.close();
return DirectoryReader.open(directory);
}