本文整理汇总了Java中org.apache.lucene.index.memory.MemoryIndex.addField方法的典型用法代码示例。如果您正苦于以下问题:Java MemoryIndex.addField方法的具体用法?Java MemoryIndex.addField怎么用?Java MemoryIndex.addField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.memory.MemoryIndex
的用法示例。
在下文中一共展示了MemoryIndex.addField方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateTermVectors
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
private Fields generateTermVectors(Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields)
throws IOException {
/* store document in memory index */
MemoryIndex index = new MemoryIndex(withOffsets);
for (GetField getField : getFields) {
String field = getField.getName();
if (fields.contains(field) == false) {
// some fields are returned even when not asked for, eg. _timestamp
continue;
}
Analyzer analyzer = getAnalyzerAtField(field, perFieldAnalyzer);
for (Object text : getField.getValues()) {
index.addField(field, text.toString(), analyzer);
}
}
/* and read vectors from it */
return MultiFields.getFields(index.createSearcher().getIndexReader());
}
示例2: indexDoc
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
MemoryIndex indexDoc(ParseContext.Document d, Analyzer analyzer, MemoryIndex memoryIndex) {
for (IndexableField field : d.getFields()) {
if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) {
continue;
}
try {
// TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
// like the indexer does
try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
if (tokenStream != null) {
memoryIndex.addField(field.name(), tokenStream, field.boost());
}
}
} catch (IOException e) {
throw new ElasticsearchException("Failed to create token stream", e);
}
}
return memoryIndex;
}
示例3: prepare
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
@Override
public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
MemoryIndex memoryIndex = cache.get();
for (IndexableField field : parsedDocument.rootDoc().getFields()) {
if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) {
continue;
}
try {
Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer();
// TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
// like the indexer does
try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
if (tokenStream != null) {
memoryIndex.addField(field.name(), tokenStream, field.boost());
}
}
} catch (Exception e) {
throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e);
}
}
context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument);
}
示例4: main
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
public static void main(String[] args) throws ParseException {
Analyzer analyzer = new StandardAnalyzer();
MemoryIndex index = new MemoryIndex();
Map<String, String> event = new HashMap<String, String>();
event.put("content", "Readings about Salmons and other select Alaska fishing Manuals");
event.put("author", "Tales of James");
for(Entry<String, String> entry : event.entrySet()){
index.addField(entry.getKey(), entry.getValue(),analyzer);
}
QueryParser parser = new QueryParser("content", analyzer);
Query query = parser.parse("+author:james +salmon~ +fish* manual~");
float score = index.search(query);
if (score > 0.0f) {
System.out.println("it's a match");
} else {
System.out.println("no match found");
}
System.out.println("indexData=" + index.toString());
}
示例5: classification
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
public void classification(Client client, IndexDocument document)
throws SearchLibException, ParseException, SyntaxError, IOException {
rwl.r.lock();
try {
MemoryIndex index = new MemoryIndex();
LanguageEnum lang = document.getLang();
Analyzer analyzer = client.getSchema().getIndexPerFieldAnalyzer(lang);
for (FieldContent fieldContent : document) {
String fieldName = fieldContent.getField();
String concatValues = fieldContent.getMergedValues(" ");
index.addField(fieldName, concatValues, analyzer);
}
if (method == ClassificationMethodEnum.MULTIVALUED)
multivaluedClassification(client, document, lang, index);
else if (method == ClassificationMethodEnum.BESTSCORE)
bestScoreClassification(client, document, lang, index);
} finally {
rwl.r.unlock();
}
}
示例6: testCreateCandidateQuery
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
public void testCreateCandidateQuery() throws Exception {
addQueryMapping();
MemoryIndex memoryIndex = new MemoryIndex(false);
memoryIndex.addField("field1", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
memoryIndex.addField("field2", "some more text", new WhitespaceAnalyzer());
memoryIndex.addField("_field3", "unhide me", new WhitespaceAnalyzer());
memoryIndex.addField("field4", "123", new WhitespaceAnalyzer());
memoryIndex.addField(new LongPoint("number_field", 10L), new WhitespaceAnalyzer());
IndexReader indexReader = memoryIndex.createSearcher().getIndexReader();
BooleanQuery candidateQuery = (BooleanQuery) fieldType.createCandidateQuery(indexReader);
assertEquals(2, candidateQuery.clauses().size());
assertEquals(Occur.SHOULD, candidateQuery.clauses().get(0).getOccur());
TermInSetQuery termsQuery = (TermInSetQuery) candidateQuery.clauses().get(0).getQuery();
PrefixCodedTerms terms = termsQuery.getTermData();
assertThat(terms.size(), equalTo(14L));
PrefixCodedTerms.TermIterator termIterator = terms.iterator();
assertTermIterator(termIterator, "_field3\u0000me", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "_field3\u0000unhide", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000brown", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000dog", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000fox", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000jumps", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000lazy", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000over", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000quick", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field1\u0000the", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field2\u0000more", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field2\u0000some", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field2\u0000text", fieldType.queryTermsField.name());
assertTermIterator(termIterator, "field4\u0000123", fieldType.queryTermsField.name());
assertEquals(Occur.SHOULD, candidateQuery.clauses().get(1).getOccur());
assertEquals(new TermQuery(new Term(fieldType.extractionResultField.name(), EXTRACTION_FAILED)),
candidateQuery.clauses().get(1).getQuery());
}
示例7: generateFields
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
/**
* Here we could go overboard and use a pre-generated indexed random document for a given Item,
* but for now we'd prefer to simply return the id as the content of the document and that for
* every field.
*/
private static Fields generateFields(String[] fieldNames, String text) throws IOException {
MemoryIndex index = new MemoryIndex();
for (String fieldName : fieldNames) {
index.addField(fieldName, text, new WhitespaceAnalyzer());
}
return MultiFields.getFields(index.createSearcher().getIndexReader());
}
示例8: main
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
public static void main(String args[]) throws ParseException, IOException{
MemoryIndex index = new MemoryIndex();
Analyzer analyzer = new StandardAnalyzer();
StringField field3 = new StringField(AUTHOR, FULL_NAME, Store.YES);
index.addField(field3, analyzer);
Query query = new TermQuery(new Term(AUTHOR,FULL_NAME));
search(index,query);
query = new TermQuery(new Term(AUTHOR,FIRST_NAME));
search(index,query);
query = new TermQuery(new Term(AUTHOR,LAST_NAME));
search(index,query);
}
示例9: searchScore
import org.apache.lucene.index.memory.MemoryIndex; //导入方法依赖的package包/类
public final double searchScore(final String fieldName,
final CompiledAnalyzer analyzer, final Query query) {
searchScore = 0;
if (query == null || analyzer == null)
return 0;
MemoryIndex index = new MemoryIndex();
index.addField(fieldName, originalText, analyzer);
searchScore = index.search(query);
return searchScore;
}