本文整理汇总了Java中org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper类的典型用法代码示例。如果您正苦于以下问题:Java PerFieldAnalyzerWrapper类的具体用法?Java PerFieldAnalyzerWrapper怎么用?Java PerFieldAnalyzerWrapper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
PerFieldAnalyzerWrapper类属于org.apache.lucene.analysis.miscellaneous包,在下文中一共展示了PerFieldAnalyzerWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBuildWordScorer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
* Test the WordScorer emitted by the smoothing model
*/
public void testBuildWordScorer() throws IOException {
SmoothingModel testModel = createTestModel();
Map<String, Analyzer> mapping = new HashMap<>();
mapping.put("field", new WhitespaceAnalyzer());
PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(wrapper));
Document doc = new Document();
doc.add(new Field("field", "someText", TextField.TYPE_NOT_STORED));
writer.addDocument(doc);
DirectoryReader ir = DirectoryReader.open(writer);
WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d,
BytesRefs.toBytesRef(" "));
assertWordScorer(wordScorer, testModel);
}
示例2: KrillIndex
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
* Constructs a new KrillIndex bound to a persistant index.
*
* @param directory
* A {@link Directory} pointing to an index
* @throws IOException
*/
public KrillIndex (Directory directory) throws IOException {
this.directory = directory;
// Add analyzers
// TODO: Should probably not be here - make configurable
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
analyzerPerField.put("textClass", new KeywordAnalyzer());
analyzerPerField.put("keywords", new KeywordAnalyzer());
analyzerPerField.put("foundries", new KeywordAnalyzer());
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(
new TextAnalyzer(), analyzerPerField);
// Create configuration with base analyzer
this.config = new IndexWriterConfig(analyzer);
}
示例3: failureToCreateAnIndexShouldNotLeaveConfigurationBehind
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@Test
public void failureToCreateAnIndexShouldNotLeaveConfigurationBehind() throws Exception
{
// WHEN
try
{
// PerFieldAnalyzerWrapper is invalid since it has no public no-arg constructor
nodeIndex( stringMap( "analyzer", PerFieldAnalyzerWrapper.class.getName() ) );
fail( "Should have failed" );
}
catch ( RuntimeException e )
{
assertThat( e.getMessage(), CoreMatchers.containsString( PerFieldAnalyzerWrapper.class.getName() ) );
}
// THEN - assert that there's no index config about this index left behind
assertFalse( "There should be no index config for index '" + currentIndexName() + "' left behind",
((GraphDatabaseAPI)graphDb).getDependencyResolver().resolveDependency( IndexConfigStore.class ).has(
Node.class, currentIndexName() ) );
}
示例4: doAddOrUpdateDocument
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doAddOrUpdateDocument(final KBEnrichmentRequest request,
final KBModifications mod) {
final HashMap<String, String> hash = new HashMap<String, String>();
final List<DocumentToProcess> docsToProcess = request.getDocList();
final DocumentToProcess doc = docsToProcess.get(0);
final List<EntryToProcess> list = doc.getEntryList();
for (final EntryToProcess pro : list) {
hash.put(pro.getFieldName(), pro.getValue());
}
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
new DoserStandardAnalyzer(), analyzerPerField);
final NewDocumentOrUpdateOperator operator = new NewDocumentOrUpdateOperator(
request.getKburi(), aWrapper, doc.getKey(), hash,
request.getPrimaryKeyField(), mod);
try {
KnowledgebaseModification.getInstance()
.processNewKnowledgeOperation(operator);
} catch (final ModifyKnowledgeBaseException e) {
Logger.getRootLogger().error(e.getStackTrace());
}
}
示例5: getKEAAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public static Analyzer getKEAAnalyzer(String fieldName){
Map<String, Analyzer> amap = new HashMap<>();
amap.put(Commons.getFieldName(fieldName, 1), new KEAAnalyzer(1));
amap.put(Commons.getFieldName(fieldName, 2), new KEAAnalyzer(2));
amap.put(Commons.getFieldName(fieldName, 3), new KEAAnalyzer(3));
return new PerFieldAnalyzerWrapper(new StandardAnalyzer(), amap);
}
示例6: doAddDocument
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doAddDocument(final KBEnrichmentRequest request) {
final List<HashMap<String, String>> list = new LinkedList<HashMap<String, String>>();
final List<DocumentToProcess> process = request.getDocList();
for (final DocumentToProcess doc : process) {
final HashMap<String, String> hash = new HashMap<String, String>();
final List<EntryToProcess> entrylist = doc.getEntryList();
for (final EntryToProcess entry : entrylist) {
hash.put(entry.getFieldName(), entry.getValue());
}
list.add(hash);
}
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
analyzerPerField.put("ID", new DoserIDAnalyzer());
PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
new DoserStandardAnalyzer(), analyzerPerField);
final AddNewDocumentsOperator operator = new AddNewDocumentsOperator(
request.getKburi(), aWrapper, list,
request.getPrimaryKeyField());
try {
KnowledgebaseModification.getInstance()
.processNewKnowledgeOperation(operator);
} catch (final ModifyKnowledgeBaseException e) {
Logger.getRootLogger().error("ModifyKnowledgeBaseException", e);
}
}
示例7: doUpdateDocument
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doUpdateDocument(final KBEnrichmentRequest request,
final KBModifications mod) {
final HashMap<String, HashMap<String, String>> hash = new HashMap<String, HashMap<String, String>>();
final List<DocumentToProcess> docs = request.getDocList();
for (final DocumentToProcess doc : docs) {
final HashMap<String, String> map = new HashMap<String, String>();
final List<EntryToProcess> entries = doc.getEntryList();
for (final EntryToProcess entry : entries) {
map.put(entry.getFieldName(), entry.getValue());
}
hash.put(doc.getKey(), map);
}
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
new DoserStandardAnalyzer(), analyzerPerField);
final UpdateKnowledgeBaseEntryOperator operator = new UpdateKnowledgeBaseEntryOperator(
request.getKburi(), aWrapper, hash,
request.getPrimaryKeyField(), mod);
try {
KnowledgebaseModification.getInstance()
.processNewKnowledgeOperation(operator);
} catch (final ModifyKnowledgeBaseException e) {
Logger.getRootLogger().error(e.getStackTrace());
}
}
示例8: CAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public CAnalyzer(Version version) {
matchVersion = version;
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
//for option name
analyzerPerField.put("op_name", new OptionNameAnalyzer(matchVersion));
//for annotated option description
analyzerPerField.put("op_desc", new EnglishAnalyzer(matchVersion));
analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(matchVersion), analyzerPerField);
}
示例9: createAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@SuppressWarnings("resource")
private static Analyzer createAnalyzer() {
final Analyzer colorAnnotatorAnalyzer = new ColorAnnotatorAnalyzer();
final Analyzer animalAnnotatorAnalyzer = new AnimalAnnotatorAnalyzer();
final Analyzer defaultAnalyzer = new WhitespaceAnalyzer();
return new PerFieldAnalyzerWrapper(defaultAnalyzer,
ImmutableMap.<String, Analyzer> of(
COLOR_FIELD, colorAnnotatorAnalyzer,
ANIMAL_FIELD, animalAnnotatorAnalyzer));
}
示例10: VocabularyIndexAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public VocabularyIndexAnalyzer() throws IOException, URISyntaxException {
super(NO_REUSE_STRATEGY);
Map<String, Analyzer> fieldAnalyzers = new HashMap<>();
fieldAnalyzers.put(NodeProperties.LABEL, new TermAnalyzer());
fieldAnalyzers.put(NodeProperties.LABEL + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.SYNONYM, new TermAnalyzer());
fieldAnalyzers.put(Concept.SYNONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.ABREVIATION, new TermAnalyzer());
fieldAnalyzers.put(Concept.ABREVIATION + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.ACRONYM, new TermAnalyzer());
fieldAnalyzers.put(Concept.ACRONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer(), fieldAnalyzers);
}
示例11: VocabularyQueryAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public VocabularyQueryAnalyzer() {
Map<String, Analyzer> fieldAnalyzers = new HashMap<>();
fieldAnalyzers.put(NodeProperties.LABEL, new TermAnalyzer());
fieldAnalyzers.put(NodeProperties.LABEL + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.SYNONYM, new TermAnalyzer());
fieldAnalyzers.put(Concept.SYNONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.ABREVIATION, new TermAnalyzer());
fieldAnalyzers.put(Concept.ABREVIATION + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
fieldAnalyzers.put(Concept.ACRONYM, new TermAnalyzer());
fieldAnalyzers.put(Concept.ACRONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer(), fieldAnalyzers);
}
示例12: beforeClass
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() {
englishAnalyzer = new EnglishAnalyzer();
spanishAnalyzer = new SpanishAnalyzer();
Map<String, Analyzer> analyzers = new HashMap<>();
analyzers.put("english", englishAnalyzer);
analyzers.put("spanish", spanishAnalyzer);
perFieldAnalyzer = new PerFieldAnalyzerWrapper(spanishAnalyzer, analyzers);
}
示例13: createAnalyzer
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@SuppressWarnings("resource")
private Analyzer createAnalyzer() {
Map<String, Analyzer> analyzerPerField = new HashMap<>();
Analyzer defaultAnalyzer = new KeywordAnalyzer();
analyzerPerField.put(FIELD_NAME, new MoveTextAnalyzer());
return new PerFieldAnalyzerWrapper(defaultAnalyzer, analyzerPerField);
}
示例14: createIndex
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createIndex(List<File> files, String idxDirectory, String baseURI) {
try {
urlAnalyzer = new SimpleAnalyzer(LUCENE_VERSION);
literalAnalyzer = new LiteralAnalyzer(LUCENE_VERSION);
Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
mapping.put(TripleIndex.FIELD_NAME_SUBJECT, urlAnalyzer);
mapping.put(TripleIndex.FIELD_NAME_PREDICATE, urlAnalyzer);
mapping.put(TripleIndex.FIELD_NAME_OBJECT_URI, urlAnalyzer);
mapping.put(TripleIndex.FIELD_NAME_OBJECT_LITERAL, literalAnalyzer);
PerFieldAnalyzerWrapper perFieldAnalyzer = new PerFieldAnalyzerWrapper(urlAnalyzer, mapping);
File indexDirectory = new File(idxDirectory);
indexDirectory.mkdir();
directory = new MMapDirectory(indexDirectory);
IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, perFieldAnalyzer);
iwriter = new IndexWriter(directory, config);
iwriter.commit();
for (File file : files) {
String type = FileUtil.getFileExtension(file.getName());
if (type.equals(TTL))
indexTTLFile(file, baseURI);
if (type.equals(TSV))
indexTSVFile(file);
iwriter.commit();
}
iwriter.close();
ireader = DirectoryReader.open(directory);
} catch (Exception e) {
log.error("Error while creating TripleIndex.", e);
}
}
示例15: createIndex
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createIndex(List<File> files, String idxDirectory, String baseURI) {
try {
urlAnalyzer = new SimpleAnalyzer(LUCENE_VERSION);
literalAnalyzer = new LiteralAnalyzer(LUCENE_VERSION);
Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
mapping.put(FIELD_NAME_URI, urlAnalyzer);
mapping.put(FIELD_NAME_SURFACE_FORM, literalAnalyzer);
mapping.put(FIELD_NAME_URI_COUNT, literalAnalyzer);
mapping.put(FIELD_NAME_CONTEXT, literalAnalyzer);
PerFieldAnalyzerWrapper perFieldAnalyzer = new PerFieldAnalyzerWrapper(urlAnalyzer, mapping);
File indexDirectory = new File(idxDirectory);
indexDirectory.mkdir();
directory = new MMapDirectory(indexDirectory);
IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, perFieldAnalyzer);
iwriter = new IndexWriter(directory, config);
iwriter.commit();
for (File file : files) {
String type = FileUtil.getFileExtension(file.getName());
if (type.equals(TTL))
indexTTLFile(file, baseURI);
iwriter.commit();
}
} catch (Exception e) {
log.error("Error while creating TripleIndex.", e);
}
}