本文整理汇总了Java中org.apache.lucene.analysis.PerFieldAnalyzerWrapper类的典型用法代码示例。如果您正苦于以下问题:Java PerFieldAnalyzerWrapper类的具体用法?Java PerFieldAnalyzerWrapper怎么用?Java PerFieldAnalyzerWrapper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PerFieldAnalyzerWrapper类属于org.apache.lucene.analysis包,在下文中一共展示了PerFieldAnalyzerWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void testAnalyzer() throws Exception {
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_41);
String queryString = "category:/philosophy/eastern";
Query query = new QueryParser(Version.LUCENE_41,
"contents",
analyzer).parse(queryString);
assertEquals("path got split, yikes!",
"category:\"philosophy eastern\"",
query.toString("contents"));
PerFieldAnalyzerWrapper perFieldAnalyzer =
new PerFieldAnalyzerWrapper(analyzer);
perFieldAnalyzer.addAnalyzer("category",
new WhitespaceAnalyzer(Version.LUCENE_41));
query = new QueryParser(Version.LUCENE_41,
"contents",
perFieldAnalyzer).parse(queryString);
assertEquals("leave category field alone",
"category:/philosophy/eastern",
query.toString("contents"));
}
示例2: createAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public static Analyzer createAnalyzer() {
final PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_IDENTS, new WhitespaceAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_FEATURE_IDENTS, new WhitespaceAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_CASE_INSENSITIVE_FEATURE_IDENTS, new DocumentUtil.LCWhitespaceAnalyzer());
return analyzer;
}
示例3: getServerAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getServerAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("checkin", new KeywordAnalyzer());
analyzer.addAnalyzer("registered", new KeywordAnalyzer());
analyzer.addAnalyzer("ram", new KeywordAnalyzer());
analyzer.addAnalyzer("swap", new KeywordAnalyzer());
analyzer.addAnalyzer("cpuMHz", new KeywordAnalyzer());
analyzer.addAnalyzer("cpuNumberOfCpus", new KeywordAnalyzer());
return analyzer;
}
示例4: getErrataAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getErrataAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("advisoryName", new KeywordAnalyzer());
analyzer.addAnalyzer("synopsis", new StandardAnalyzer());
analyzer.addAnalyzer("description", new StandardAnalyzer());
analyzer.addAnalyzer("topic", new StandardAnalyzer());
analyzer.addAnalyzer("solution", new StandardAnalyzer());
return analyzer;
}
示例5: getSnapshotTagAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getSnapshotTagAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("id", new KeywordAnalyzer());
analyzer.addAnalyzer("snapshotId", new KeywordAnalyzer());
analyzer.addAnalyzer("orgId", new KeywordAnalyzer());
analyzer.addAnalyzer("serverId", new KeywordAnalyzer());
analyzer.addAnalyzer("tagNameId", new KeywordAnalyzer());
analyzer.addAnalyzer("created", new KeywordAnalyzer());
analyzer.addAnalyzer("modified", new KeywordAnalyzer());
return analyzer;
}
示例6: getHardwareDeviceAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getHardwareDeviceAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("id", new KeywordAnalyzer());
analyzer.addAnalyzer("serverId", new KeywordAnalyzer());
analyzer.addAnalyzer("pciType", new KeywordAnalyzer());
return analyzer;
}
示例7: getServerCustomInfoAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getServerCustomInfoAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("id", new KeywordAnalyzer());
analyzer.addAnalyzer("serverId", new KeywordAnalyzer());
analyzer.addAnalyzer("created", new KeywordAnalyzer());
analyzer.addAnalyzer("modified", new KeywordAnalyzer());
analyzer.addAnalyzer("createdBy", new KeywordAnalyzer());
analyzer.addAnalyzer("lastModifiedBy", new KeywordAnalyzer());
return analyzer;
}
示例8: getDefaultAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private Analyzer getDefaultAnalyzer() {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new
NGramAnalyzer(min_ngram, max_ngram));
analyzer.addAnalyzer("id", new KeywordAnalyzer());
analyzer.addAnalyzer("arch", new KeywordAnalyzer());
analyzer.addAnalyzer("epoch", new KeywordAnalyzer());
analyzer.addAnalyzer("version", new KeywordAnalyzer());
analyzer.addAnalyzer("release", new KeywordAnalyzer());
analyzer.addAnalyzer("filename", new KeywordAnalyzer());
return analyzer;
}
示例9: createPerFieldAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createPerFieldAnalyzer(){
Map<String, Analyzer> fieldAnalyzerMap = new HashMap<String, Analyzer>();
for(Language language : languages) {
String topicContentFieldName = MultiLingualAbstractOTDFLucDocCreator.Fields.getLanguageTopicContentField(language);
Analyzer analyzer = getAnalyzer(language);
if(analyzer!=null)
fieldAnalyzerMap.put(topicContentFieldName, analyzer);
}
analyzers = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_36), fieldAnalyzerMap);
}
示例10: createPerFieldAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createPerFieldAnalyzer(Set<Language> languagesDone){
Map<String, Analyzer> fieldAnalyzerMap = new HashMap<String, Analyzer>();
for(Language language : languagesDone) {
String topicContentFieldName = MultiLingualArticleOTDFLucDocCreator.Fields.getLanguageTopicContentField(language);
Analyzer analyzer = getAnalyzer(language);
if(analyzer!=null)
fieldAnalyzerMap.put(topicContentFieldName, analyzer);
}
analyzers = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_36), fieldAnalyzerMap);
}
示例11: testPerFieldAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void testPerFieldAnalyzer() throws Exception {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(
new SimpleAnalyzer());
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
Query query = new QueryParser(Version.LUCENE_41,
"description", analyzer).parse(
"partnum:Q36 AND SPACE");
assertEquals("Q36 kept as-is",
"+partnum:Q36 +space", query.toString("description"));
assertEquals("doc found!", 1, TestUtil.hitCount(searcher, query));
}
示例12: convertCanonicOutput
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void convertCanonicOutput(CanonicOutput canonicOutput,Document document,LuceneOptions luceneOptions)
{
SimilarityForms sf = SimilarityFormConverterWrapper.getConverter().process(canonicOutput);
document.add(newField("co.configuration.id",
canonicOutput.getApplicationRun().getConfiguration().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
document.add(newField("co.revision.id",
canonicOutput.getApplicationRun().getRevision().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
document.add(newField("co.applicationrun.id",
canonicOutput.getApplicationRun().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
if(canonicOutput.getAnnotations() != null && !canonicOutput.getAnnotations().isEmpty())
{
for(Annotation a : canonicOutput.getAnnotations())
{
document.add(newField("co.annotation", a.getAnnotationContent(), luceneOptions, new StandardAnalyzer(Version.LUCENE_36)));
}
}
// mathml is converted into Single String representation
// which is stored in co.distanceForm
document.add(newField("co.distanceForm",sf.getDistanceForm(),luceneOptions,null));
PerFieldAnalyzerWrapper keywordAnalyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer());
for(String s : sf.getCountForm().keySet())
{
document.add(newField("co.element", s+"="+sf.getCountForm().get(s), luceneOptions, keywordAnalyzer));
}
logger.info("Canonic output ["+canonicOutput.getId()+"] indexed.");
}
示例13: build
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
* Método para construir el índice con la colección por defecto
*
* @param operación a realizar: MAKE o ADD
*/
private long build(int operation) throws IndexException {
long indexedFiles = 0;
String message = "Lucene index will be created at [" + this.indexPath + "]";
OutputMonitor.printLine(message, OutputMonitor.INFORMATION_MESSAGE);
this.notifyTaskProgress(INFORMATION_MESSAGE, message);
//inicia la indexacion
try {
if (safeToBuildIndex(this.indexPath, operation)) {
setStartTimeOfIndexation(new Date());
// this.analyzer = new NGramAnalyzer();
//TODO I changed theses lines 2012-11-12
// this.setFieldAnalyzer(new PerFieldAnalyzerWrapper(new NGramAnalyzer()));
// this.getFieldAnalyzer().addAnalyzer(getDocumentField(FIELD_CODE_ALL_COMMENTS), new StopStemAnalyzer());
if (this.appendIndex) {
//Adding: new docs
this.writer = new IndexWriter(FSDirectory.open(this.indexPath), this.getFieldAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
if (applyLSI) {
this.writerLSI = new IndexWriter(FSDirectory.open(this.indexLSIPath), new PerFieldAnalyzerWrapper(new StopStemAnalyzer()), false, IndexWriter.MaxFieldLength.UNLIMITED);
}
// ("number "+writer.getReader().maxDoc());
} else {
//create or overwrite index
this.writer = new IndexWriter(FSDirectory.open(this.indexPath), this.getFieldAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
if (applyLSI) {
this.writerLSI = new IndexWriter(FSDirectory.open(this.indexLSIPath), new PerFieldAnalyzerWrapper(new StopStemAnalyzer()), true, IndexWriter.MaxFieldLength.UNLIMITED);
}
}
indexedFiles = indexDocs(this.writer, this.writerLSI, this.collectionPath, operation);
message = "Optimizing...";
OutputMonitor.printLine(message, OutputMonitor.INFORMATION_MESSAGE);
this.notifyTaskProgress(INFORMATION_MESSAGE, message);
this.writer.optimize();
this.writer.close();
if (applyLSI) {
this.writerLSI.optimize();
this.writerLSI.close();
}
setEndTimeOfIndexation(new Date());
message = "Indexation Time " + this.getIndexationTime() + " milliseconds.";
OutputMonitor.printLine(message, OutputMonitor.INFORMATION_MESSAGE);
this.notifyTaskProgress(INFORMATION_MESSAGE, message);
}
} catch (IOException e) {
message = " caught a " + e.getClass() + "\n with message: " + e.getMessage() + ".";
this.notifyTaskProgress(ERROR_MESSAGE, message);
throw new IndexException(message);
}
initLSIManager(); // inicializar la matriz de LSI
return indexedFiles;
}
示例14: getFieldAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
* @return the fieldAnalyzer
*/
public PerFieldAnalyzerWrapper getFieldAnalyzer() {
return fieldAnalyzer;
}
示例15: setFieldAnalyzer
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
* @param fieldAnalyzer the fieldAnalyzer to set
*/
public void setFieldAnalyzer(PerFieldAnalyzerWrapper fieldAnalyzer) {
this.fieldAnalyzer = fieldAnalyzer;
}