本文整理汇总了Java中org.apache.lucene.analysis.KeywordAnalyzer类的典型用法代码示例。如果您正苦于以下问题:Java KeywordAnalyzer类的具体用法?Java KeywordAnalyzer怎么用?Java KeywordAnalyzer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KeywordAnalyzer类属于org.apache.lucene.analysis包,在下文中一共展示了KeywordAnalyzer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: process
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
@Override
public void process(ProcessingContext<Corpus> ctx, Corpus corpus) throws ModuleException {
try (KeywordAnalyzer kwa = new KeywordAnalyzer()) {
IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_36, kwa);
writerConfig.setOpenMode(append ? OpenMode.CREATE_OR_APPEND : OpenMode.CREATE);
try (Directory dir = FSDirectory.open(indexDir)) {
try (IndexWriter writer = new IndexWriter(dir, writerConfig)) {
AlvisDBIndexerResolvedObjects resObj = getResolvedObjects();
Logger logger = getLogger(ctx);
EvaluationContext evalCtx = new EvaluationContext(logger);
for (ADBElements.Resolved ent : resObj.elements) {
ent.indexElements(logger, writer, evalCtx, corpus);
}
}
}
catch (IOException e) {
rethrow(e);
}
}
}
示例2: skynetsearch
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public Hits skynetsearch(String query, String Field, String indexPath) {
String indexfield = Field + ":";
String querytext = indexfield + query.trim();
Hits result = null;
try {
String[] search_fields = {Field};
//String indexPath = StorageHandler.GetDocIndexPath();
IndexSearcher searcher = new IndexSearcher(indexPath);
KeywordAnalyzer analyzer = new KeywordAnalyzer();
Query lucenequery = MultiFieldQueryParser.parse(query,
search_fields, analyzer);
// QueryParser queryparse = new QueryParser(query,analyzer);
// Query lucenequery = queryparse.parse(querytext);
result = searcher.search(lucenequery);
} catch (IOException e) {
e.printStackTrace();
} catch (Exception ex) {
System.out.println(ex + "");
}
return result;
}
示例3: skynetsearchMulti
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public Hits skynetsearchMulti(String query, String[] Field, String indexPath) {
Hits result = null;
try {
IndexSearcher searcher = new IndexSearcher(indexPath);
KeywordAnalyzer analyzer = new KeywordAnalyzer();
MultiFieldQueryParser multiparser = new MultiFieldQueryParser(Field, analyzer);
multiparser.setDefaultOperator(QueryParser.Operator.OR);
Query lucenequery = multiparser.parse(query);
result = searcher.search(lucenequery);
} catch (IOException e) {
e.printStackTrace();
} catch (Exception ex) {
System.out.println(ex + "");
}
return result;
}
示例4: createAnalyzer
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public static Analyzer createAnalyzer() {
final PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_IDENTS, new WhitespaceAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_FEATURE_IDENTS, new WhitespaceAnalyzer());
analyzer.addAnalyzer(DocumentUtil.FIELD_CASE_INSENSITIVE_FEATURE_IDENTS, new DocumentUtil.LCWhitespaceAnalyzer());
return analyzer;
}
示例5: createTransactionalDocumentIndex
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
/**
* Creates a transactional document based index.
* The returned {@link DocumentIndex} is not cached, next call with the same arguments returns a different instance
* of {@link DocumentIndex}. The caller is responsible to cache the returned {@link DocumentIndex}.
* @param cacheFolder the folder in which the index should be stored
* @param cache the document caching provider
* @return the document based index
* @since 2.19
*/
@NonNull
public static DocumentIndex.Transactional createTransactionalDocumentIndex (
final @NonNull File cacheFolder,
final @NonNull DocumentIndexCache cache) throws IOException {
Parameters.notNull("cacheFolder", cacheFolder); //NOI18N
Parameters.notNull("cache", cache); //NOI18N
return createTransactionalDocumentIndex(
createTransactionalIndex(cacheFolder, new KeywordAnalyzer()),
cache);
}
示例6: setupLuceneIndex
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
private void setupLuceneIndex() throws Exception {
clearWorkDir();
final File wd = getWorkDir();
cache = new File(wd,"cache");
cache.mkdirs();
index = LuceneIndex.create(cache, new KeywordAnalyzer());
}
示例7: testAsyncClose
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public void testAsyncClose() throws Exception {
final CountDownLatch slot = new CountDownLatch(1);
final CountDownLatch signal = new CountDownLatch(1);
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<Exception> exception = new AtomicReference<Exception>();
final Index index = IndexManager.createTransactionalIndex(indexFolder, new KeywordAnalyzer());
final Thread worker = new Thread(new Runnable() {
@Override
public void run() {
try {
index.store(
new ArrayList<String>(Arrays.asList("foo")), //NOI18N
Collections.<String>emptySet(),
new TestInsertConvertor(slot, signal),
new TestDeleteConvertor(),
true);
} catch (Exception ex) {
exception.set(ex);
} finally {
done.countDown();
}
}
});
worker.start();
signal.await();
slot.countDown();
index.close();
done.await();
assertNull(exception.get());
}
示例8: getOverlay
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
@NonNull
private synchronized DocumentIndex2 getOverlay() throws IOException {
if (overlay == null) {
overlay = (DocumentIndex2) IndexManager.createDocumentIndex(IndexManager.createMemoryIndex(new KeywordAnalyzer()));
}
return overlay;
}
示例9: open
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public void open() throws Exception
{
if ( !directory.exists() && !directory.mkdirs() )
{
throw new IOException("Could not make: " + directory);
}
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_35, new KeywordAnalyzer()).setOpenMode(IndexWriterConfig.OpenMode.CREATE);
niofsDirectory = new NIOFSDirectory(directory, new SingleInstanceLockFactory());
writer = new IndexWriter(niofsDirectory, conf);
}
示例10: openForWriting
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
/**
* Opens collection for writing.
* @throws IOException if opening collection fails
*/
private void openForWriting() throws IOException {
if (writer == null) {
if (reader != null) {
reader.close();
reader = null;
}
// create writer used to store data
Analyzer analyzer = new KeywordAnalyzer();
writer = new IndexWriter(directory, analyzer, IndexWriter.MaxFieldLength.UNLIMITED);
}
}
示例11: testPerFieldAnalyzer
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public void testPerFieldAnalyzer() throws Exception {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(
new SimpleAnalyzer());
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
Query query = new QueryParser(Version.LUCENE_41,
"description", analyzer).parse(
"partnum:Q36 AND SPACE");
assertEquals("Q36 kept as-is",
"+partnum:Q36 +space", query.toString("description"));
assertEquals("doc found!", 1, TestUtil.hitCount(searcher, query));
}
示例12: testIsValid
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public void testIsValid() throws Exception {
final File wd = getWorkDir();
final File cache = new File(wd,"cache");
cache.mkdirs();
final LuceneIndex index = LuceneIndex.create(cache, new KeywordAnalyzer());
//Empty index => invalid
assertEquals(Index.Status.EMPTY, index.getStatus(true));
clearValidityCache(index);
List<String> refs = new ArrayList<String>();
refs.add("A");
Set<String> toDel = new HashSet<String>();
index.store(
refs,
toDel,
new StrToDocConvertor("resources"),
new StrToQueryCovertor("resource"),
true);
//Existing index => valid
assertEquals(Index.Status.VALID, index.getStatus(true));
assertTrue(cache.listFiles().length>0);
clearValidityCache(index);
createLock(index);
//Index with orphan lock => invalid
assertEquals(Index.Status.INVALID, index.getStatus(true));
assertTrue(cache.listFiles().length==0);
refs.add("B");
clearValidityCache(index);
index.store(
refs,
toDel,
new StrToDocConvertor("resources"),
new StrToQueryCovertor("resource"),
true);
assertEquals(Index.Status.VALID, index.getStatus(true));
assertTrue(cache.listFiles().length>0);
//Broken index => invalid
clearValidityCache(index);
File bt = null;
for (File file : cache.listFiles()) {
// either compound file or filds information must be present
if (file.getName().endsWith(".cfs") || file.getName().endsWith(".fnm")) {
bt = file;
break;
}
}
assertNotNull(bt);
FileOutputStream out = new FileOutputStream(bt);
try {
out.write(new byte[] {0,0,0,0,0,0,0,0,0,0}, 0, 10);
} finally {
out.close();
}
assertEquals(Index.Status.INVALID, index.getStatus(true));
assertTrue(cache.listFiles().length==0);
}
示例13: testConcurrentReadWrite
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
public void testConcurrentReadWrite() throws Exception {
final Index index = IndexManager.createTransactionalIndex(indexFolder, new KeywordAnalyzer());
index.store(
new ArrayList<String>(Arrays.asList("a")), //NOI18N
Collections.<String>emptySet(),
new TestInsertConvertor(),
new TestDeleteConvertor(),
true);
final CountDownLatch slot = new CountDownLatch(1);
final CountDownLatch signal = new CountDownLatch(1);
final CountDownLatch done = new CountDownLatch(1);
final AtomicReference<Exception> result = new AtomicReference<Exception>();
final Thread worker = new Thread(new Runnable() {
@Override
public void run() {
try {
index.store(
new ArrayList<String>(Arrays.asList("b")), //NOI18N
Collections.<String>emptySet(),
new TestInsertConvertor(slot, signal),
new TestDeleteConvertor(),
true);
} catch (Exception e) {
result.set(e);
} finally {
done.countDown();
}
}
});
worker.start();
signal.await();
final Collection<String> data = new ArrayList<String>();
index.query(
data,
new Convertor<Document,String>(){
@Override
public String convert(Document p) {
return p.get(FLD_KEY);
}
},
null,
new AtomicBoolean(),
new PrefixQuery(new Term(FLD_KEY,""))); //NOI18N
assertEquals(1, data.size());
assertEquals("a", data.iterator().next()); //NOI18N
slot.countDown();
done.await();
assertNull(result.get());
}
示例14: search
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
@Test
// 1000 times: 2973 mills.
// 1000 times: 2927 mills.
// 1000 times: 2967 mills.
//
// 10000 times: 21268 mills.
// verified: ok
public void search() throws Exception {
createCommonDaoImpl();
//
FullTextSession fullTextSession = Search.getFullTextSession(sessionFactory.openSession());
// StopAnalyzer 完全相同才能找到資料,同=,無法查中文
// StandardAnalyzer 能找到資料,同like
Analyzer analyzer = new KeywordAnalyzer();
// Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_34);
QueryParser parser = new QueryParser(Version.LUCENE_31, "id", analyzer);
parser.setAllowLeadingWildcard(true);
parser.setLowercaseExpandedTerms(true);
//
// name:Marry
// name:瑪莉
// String search = String.Format("name:{0} AND title:{1}", "中国建设银行",
// "doc1");
StringBuilder lql = new StringBuilder();
// #issue: 大寫找不到???
// lql.append("id:*a*");
// lql.append("audit:*sys*");
lql.append("names:*a*");
org.apache.lucene.search.Query luceneQuery = parser.parse(lql.toString());
FullTextQuery fullTextQuery = fullTextSession.createFullTextQuery(luceneQuery, DogPoImpl.class);
//
List result = null;
int count = 1;
long beg = System.currentTimeMillis();
for (int i = 0; i < count; i++) {
result = fullTextQuery.list();
}
long end = System.currentTimeMillis();
System.out.println(count + " times: " + (end - beg) + " mills. ");
System.out.println(result.size() + ", " + result);
assertNotNull(result);
}
示例15: convertCanonicOutput
import org.apache.lucene.analysis.KeywordAnalyzer; //导入依赖的package包/类
private void convertCanonicOutput(CanonicOutput canonicOutput,Document document,LuceneOptions luceneOptions)
{
SimilarityForms sf = SimilarityFormConverterWrapper.getConverter().process(canonicOutput);
document.add(newField("co.configuration.id",
canonicOutput.getApplicationRun().getConfiguration().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
document.add(newField("co.revision.id",
canonicOutput.getApplicationRun().getRevision().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
document.add(newField("co.applicationrun.id",
canonicOutput.getApplicationRun().getId().toString(),
luceneOptions,
new StandardAnalyzer(Version.LUCENE_36)
)
);
if(canonicOutput.getAnnotations() != null && !canonicOutput.getAnnotations().isEmpty())
{
for(Annotation a : canonicOutput.getAnnotations())
{
document.add(newField("co.annotation", a.getAnnotationContent(), luceneOptions, new StandardAnalyzer(Version.LUCENE_36)));
}
}
// mathml is converted into Single String representation
// which is stored in co.distanceForm
document.add(newField("co.distanceForm",sf.getDistanceForm(),luceneOptions,null));
PerFieldAnalyzerWrapper keywordAnalyzer = new PerFieldAnalyzerWrapper(new KeywordAnalyzer());
for(String s : sf.getCountForm().keySet())
{
document.add(newField("co.element", s+"="+sf.getCountForm().get(s), luceneOptions, keywordAnalyzer));
}
logger.info("Canonic output ["+canonicOutput.getId()+"] indexed.");
}