当前位置: 首页>>代码示例>>Java>>正文


Java PerFieldAnalyzerWrapper类代码示例

本文整理汇总了Java中org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper的典型用法代码示例。如果您正苦于以下问题:Java PerFieldAnalyzerWrapper类的具体用法?Java PerFieldAnalyzerWrapper怎么用?Java PerFieldAnalyzerWrapper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


PerFieldAnalyzerWrapper类属于org.apache.lucene.analysis.miscellaneous包,在下文中一共展示了PerFieldAnalyzerWrapper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBuildWordScorer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
 * Test the WordScorer emitted by the smoothing model
 */
public void testBuildWordScorer() throws IOException {
    SmoothingModel testModel = createTestModel();
    Map<String, Analyzer> mapping = new HashMap<>();
    mapping.put("field", new WhitespaceAnalyzer());
    PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
    IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(wrapper));
    Document doc = new Document();
    doc.add(new Field("field", "someText", TextField.TYPE_NOT_STORED));
    writer.addDocument(doc);
    DirectoryReader ir = DirectoryReader.open(writer);

    WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d,
            BytesRefs.toBytesRef(" "));
    assertWordScorer(wordScorer, testModel);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:19,代码来源:SmoothingModelTestCase.java

示例2: KrillIndex

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
/**
 * Constructs a new KrillIndex bound to a persistant index.
 * 
 * @param directory
 *            A {@link Directory} pointing to an index
 * @throws IOException
 */
public KrillIndex (Directory directory) throws IOException {
    this.directory = directory;

    // Add analyzers
    // TODO: Should probably not be here - make configurable
    Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
    analyzerPerField.put("textClass", new KeywordAnalyzer());
    analyzerPerField.put("keywords", new KeywordAnalyzer());
    analyzerPerField.put("foundries", new KeywordAnalyzer());
    PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(
            new TextAnalyzer(), analyzerPerField);

    // Create configuration with base analyzer
    this.config = new IndexWriterConfig(analyzer);
}
 
开发者ID:KorAP,项目名称:Krill,代码行数:23,代码来源:KrillIndex.java

示例3: failureToCreateAnIndexShouldNotLeaveConfigurationBehind

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@Test
public void failureToCreateAnIndexShouldNotLeaveConfigurationBehind() throws Exception
{
    // WHEN
    try
    {
        // PerFieldAnalyzerWrapper is invalid since it has no public no-arg constructor
        nodeIndex( stringMap( "analyzer", PerFieldAnalyzerWrapper.class.getName() ) );
        fail( "Should have failed" );
    }
    catch ( RuntimeException e )
    {
        assertThat( e.getMessage(), CoreMatchers.containsString( PerFieldAnalyzerWrapper.class.getName() ) );
    }

    // THEN - assert that there's no index config about this index left behind
    assertFalse( "There should be no index config for index '" + currentIndexName() + "' left behind",
            ((GraphDatabaseAPI)graphDb).getDependencyResolver().resolveDependency( IndexConfigStore.class ).has(
                    Node.class, currentIndexName() ) );
}
 
开发者ID:neo4j-contrib,项目名称:neo4j-lucene5-index,代码行数:21,代码来源:TestLuceneIndex.java

示例4: doAddOrUpdateDocument

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doAddOrUpdateDocument(final KBEnrichmentRequest request,
		final KBModifications mod) {
	final HashMap<String, String> hash = new HashMap<String, String>();
	final List<DocumentToProcess> docsToProcess = request.getDocList();
	final DocumentToProcess doc = docsToProcess.get(0);
	final List<EntryToProcess> list = doc.getEntryList();
	for (final EntryToProcess pro : list) {
		hash.put(pro.getFieldName(), pro.getValue());
	}

	Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
	analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
	PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
			new DoserStandardAnalyzer(), analyzerPerField);

	final NewDocumentOrUpdateOperator operator = new NewDocumentOrUpdateOperator(
			request.getKburi(), aWrapper, doc.getKey(), hash,
			request.getPrimaryKeyField(), mod);

	try {
		KnowledgebaseModification.getInstance()
				.processNewKnowledgeOperation(operator);
	} catch (final ModifyKnowledgeBaseException e) {
		Logger.getRootLogger().error(e.getStackTrace());
	}
}
 
开发者ID:quhfus,项目名称:DoSeR,代码行数:27,代码来源:KBEnrichmentService.java

示例5: getKEAAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public static Analyzer getKEAAnalyzer(String fieldName){
  Map<String, Analyzer> amap = new HashMap<>();
  amap.put(Commons.getFieldName(fieldName, 1), new KEAAnalyzer(1));
  amap.put(Commons.getFieldName(fieldName, 2), new KEAAnalyzer(2));
  amap.put(Commons.getFieldName(fieldName, 3), new KEAAnalyzer(3));
  return new PerFieldAnalyzerWrapper(new StandardAnalyzer(), amap);
}
 
开发者ID:kojisekig,项目名称:KEA-lucene,代码行数:8,代码来源:Commons.java

示例6: doAddDocument

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doAddDocument(final KBEnrichmentRequest request) {
	final List<HashMap<String, String>> list = new LinkedList<HashMap<String, String>>();
	final List<DocumentToProcess> process = request.getDocList();
	for (final DocumentToProcess doc : process) {
		final HashMap<String, String> hash = new HashMap<String, String>();
		final List<EntryToProcess> entrylist = doc.getEntryList();
		for (final EntryToProcess entry : entrylist) {
			hash.put(entry.getFieldName(), entry.getValue());
		}
		list.add(hash);
	}

	Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
	analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
	analyzerPerField.put("ID", new DoserIDAnalyzer());
	PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
			new DoserStandardAnalyzer(), analyzerPerField);

	final AddNewDocumentsOperator operator = new AddNewDocumentsOperator(
			request.getKburi(), aWrapper, list,
			request.getPrimaryKeyField());

	try {
		KnowledgebaseModification.getInstance()
				.processNewKnowledgeOperation(operator);
	} catch (final ModifyKnowledgeBaseException e) {
		Logger.getRootLogger().error("ModifyKnowledgeBaseException", e);
	}
}
 
开发者ID:quhfus,项目名称:DoSeR,代码行数:30,代码来源:KBEnrichmentService.java

示例7: doUpdateDocument

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
private void doUpdateDocument(final KBEnrichmentRequest request,
		final KBModifications mod) {
	final HashMap<String, HashMap<String, String>> hash = new HashMap<String, HashMap<String, String>>();
	final List<DocumentToProcess> docs = request.getDocList();
	for (final DocumentToProcess doc : docs) {
		final HashMap<String, String> map = new HashMap<String, String>();
		final List<EntryToProcess> entries = doc.getEntryList();
		for (final EntryToProcess entry : entries) {
			map.put(entry.getFieldName(), entry.getValue());
		}
		hash.put(doc.getKey(), map);
	}

	Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
	analyzerPerField.put("Mainlink", new DoserIDAnalyzer());
	PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(
			new DoserStandardAnalyzer(), analyzerPerField);

	final UpdateKnowledgeBaseEntryOperator operator = new UpdateKnowledgeBaseEntryOperator(
			request.getKburi(), aWrapper, hash,
			request.getPrimaryKeyField(), mod);

	try {
		KnowledgebaseModification.getInstance()
				.processNewKnowledgeOperation(operator);
	} catch (final ModifyKnowledgeBaseException e) {
		Logger.getRootLogger().error(e.getStackTrace());
	}
}
 
开发者ID:quhfus,项目名称:DoSeR,代码行数:30,代码来源:KBEnrichmentService.java

示例8: CAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public CAnalyzer(Version version) {
	matchVersion = version;
	
	Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
	//for option name
	analyzerPerField.put("op_name", new OptionNameAnalyzer(matchVersion));
	
	//for annotated option description
	analyzerPerField.put("op_desc", new EnglishAnalyzer(matchVersion));
	analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(matchVersion), analyzerPerField);
}
 
开发者ID:tianyin,项目名称:cox,代码行数:12,代码来源:CAnalyzer.java

示例9: createAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@SuppressWarnings("resource")
private static Analyzer createAnalyzer() {
    final Analyzer colorAnnotatorAnalyzer = new ColorAnnotatorAnalyzer();
    final Analyzer animalAnnotatorAnalyzer = new AnimalAnnotatorAnalyzer();
    final Analyzer defaultAnalyzer = new WhitespaceAnalyzer();
    return new PerFieldAnalyzerWrapper(defaultAnalyzer,
            ImmutableMap.<String, Analyzer> of(
                    COLOR_FIELD, colorAnnotatorAnalyzer,
                    ANIMAL_FIELD, animalAnnotatorAnalyzer));
}
 
开发者ID:shaie,项目名称:lucenelab,代码行数:11,代码来源:AnnotatorTokenFilterExample.java

示例10: VocabularyIndexAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public VocabularyIndexAnalyzer() throws IOException, URISyntaxException {
  super(NO_REUSE_STRATEGY);
  Map<String, Analyzer> fieldAnalyzers = new HashMap<>();
  fieldAnalyzers.put(NodeProperties.LABEL, new TermAnalyzer());
  fieldAnalyzers.put(NodeProperties.LABEL + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.SYNONYM, new TermAnalyzer());
  fieldAnalyzers.put(Concept.SYNONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.ABREVIATION, new TermAnalyzer());
  fieldAnalyzers.put(Concept.ABREVIATION + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.ACRONYM, new TermAnalyzer());
  fieldAnalyzers.put(Concept.ACRONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer(), fieldAnalyzers);
}
 
开发者ID:SciGraph,项目名称:SciGraph,代码行数:14,代码来源:VocabularyIndexAnalyzer.java

示例11: VocabularyQueryAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public VocabularyQueryAnalyzer() {
  Map<String, Analyzer> fieldAnalyzers = new HashMap<>();
  fieldAnalyzers.put(NodeProperties.LABEL, new TermAnalyzer());
  fieldAnalyzers.put(NodeProperties.LABEL + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.SYNONYM, new TermAnalyzer());
  fieldAnalyzers.put(Concept.SYNONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.ABREVIATION, new TermAnalyzer());
  fieldAnalyzers.put(Concept.ABREVIATION + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  fieldAnalyzers.put(Concept.ACRONYM, new TermAnalyzer());
  fieldAnalyzers.put(Concept.ACRONYM + LuceneUtils.EXACT_SUFFIX, new ExactAnalyzer());
  analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer(), fieldAnalyzers);
}
 
开发者ID:SciGraph,项目名称:SciGraph,代码行数:13,代码来源:VocabularyQueryAnalyzer.java

示例12: beforeClass

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() {
    englishAnalyzer = new EnglishAnalyzer();
    spanishAnalyzer = new SpanishAnalyzer();
    Map<String, Analyzer> analyzers = new HashMap<>();
    analyzers.put("english", englishAnalyzer);
    analyzers.put("spanish", spanishAnalyzer);
    perFieldAnalyzer = new PerFieldAnalyzerWrapper(spanishAnalyzer, analyzers);
}
 
开发者ID:Stratio,项目名称:stratio-cassandra,代码行数:10,代码来源:AnalysisUtilsTest.java

示例13: createAnalyzer

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
@SuppressWarnings("resource")
private Analyzer createAnalyzer() {
    Map<String, Analyzer> analyzerPerField = new HashMap<>();
    Analyzer defaultAnalyzer = new KeywordAnalyzer();
    analyzerPerField.put(FIELD_NAME, new MoveTextAnalyzer());
    return new PerFieldAnalyzerWrapper(defaultAnalyzer, analyzerPerField);
}
 
开发者ID:Elegie,项目名称:luchess,代码行数:8,代码来源:ProofOfConcept.java

示例14: createIndex

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createIndex(List<File> files, String idxDirectory, String baseURI) {
	try {
		urlAnalyzer = new SimpleAnalyzer(LUCENE_VERSION);
		literalAnalyzer = new LiteralAnalyzer(LUCENE_VERSION);
		Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
		mapping.put(TripleIndex.FIELD_NAME_SUBJECT, urlAnalyzer);
		mapping.put(TripleIndex.FIELD_NAME_PREDICATE, urlAnalyzer);
		mapping.put(TripleIndex.FIELD_NAME_OBJECT_URI, urlAnalyzer);
		mapping.put(TripleIndex.FIELD_NAME_OBJECT_LITERAL, literalAnalyzer);
		PerFieldAnalyzerWrapper perFieldAnalyzer = new PerFieldAnalyzerWrapper(urlAnalyzer, mapping);

		File indexDirectory = new File(idxDirectory);
		indexDirectory.mkdir();
		directory = new MMapDirectory(indexDirectory);
		IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, perFieldAnalyzer);
		iwriter = new IndexWriter(directory, config);
		iwriter.commit();
		for (File file : files) {
			String type = FileUtil.getFileExtension(file.getName());
			if (type.equals(TTL))
				indexTTLFile(file, baseURI);
			if (type.equals(TSV))
				indexTSVFile(file);
			iwriter.commit();
		}
		iwriter.close();
		ireader = DirectoryReader.open(directory);
	} catch (Exception e) {
		log.error("Error while creating TripleIndex.", e);
	}
}
 
开发者ID:dice-group,项目名称:AGDISTIS,代码行数:32,代码来源:TripleIndexCreator.java

示例15: createIndex

import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; //导入依赖的package包/类
public void createIndex(List<File> files, String idxDirectory, String baseURI) {
	try {
		urlAnalyzer = new SimpleAnalyzer(LUCENE_VERSION);
		literalAnalyzer = new LiteralAnalyzer(LUCENE_VERSION);
		Map<String, Analyzer> mapping = new HashMap<String, Analyzer>();
		mapping.put(FIELD_NAME_URI, urlAnalyzer);
		mapping.put(FIELD_NAME_SURFACE_FORM, literalAnalyzer);
		mapping.put(FIELD_NAME_URI_COUNT, literalAnalyzer);
		mapping.put(FIELD_NAME_CONTEXT, literalAnalyzer);
		PerFieldAnalyzerWrapper perFieldAnalyzer = new PerFieldAnalyzerWrapper(urlAnalyzer, mapping);

		File indexDirectory = new File(idxDirectory);
		indexDirectory.mkdir();
		directory = new MMapDirectory(indexDirectory);
		IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, perFieldAnalyzer);
		iwriter = new IndexWriter(directory, config);
		iwriter.commit();
		for (File file : files) {
			String type = FileUtil.getFileExtension(file.getName());
			if (type.equals(TTL))
				indexTTLFile(file, baseURI);
			iwriter.commit();
		}
	} catch (Exception e) {
		log.error("Error while creating TripleIndex.", e);
	}
}
 
开发者ID:dice-group,项目名称:AGDISTIS,代码行数:28,代码来源:TripleIndexCreatorContext.java


注:本文中的org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。