本文整理汇总了Java中org.lemurproject.galago.core.tokenize.Tokenizer.create方法的典型用法代码示例。如果您正苦于以下问题:Java Tokenizer.create方法的具体用法?Java Tokenizer.create怎么用?Java Tokenizer.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.lemurproject.galago.core.tokenize.Tokenizer
的用法示例。
在下文中一共展示了Tokenizer.create方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Indexer
import org.lemurproject.galago.core.tokenize.Tokenizer; //导入方法依赖的package包/类
public Indexer(/*Parameters p ? */) throws Exception {
// basic plan:
// pass universal parser a file path
// universal parser will extract a stream of documents (or just one)
// passes them to the tokenzier
// tokenizer will extract word tokens
// passes documents to the numberer
// a number is assigned
// a fully formed document is the given to the index
indexer = new UniversalParser(new FakeParameters(Parameters.create()));
Tokenizer p2 = Tokenizer.create(Parameters.create());
SequentialDocumentNumberer p3 = new SequentialDocumentNumberer();
Parameters indexParams = Parameters.create();
indexParams.set("shardDirectory", "/path/to/store/output/");
indexParams.set("indexBlockSize", 100);
indexParams.set("radix", 2);
indexParams.set("mergeMode", "local");
indexParams.set("stemming", true);
indexParams.set("makecorpus", false);
index = new GeometricIndex(new FakeParameters(indexParams));
retrieval = new LocalRetrieval(index);
// now link these steps together
indexer.setProcessor(p2);
p2.setProcessor(p3);
p3.setProcessor(index);
}
示例2: getTokenizer
import org.lemurproject.galago.core.tokenize.Tokenizer; //导入方法依赖的package包/类
@Override
public Tokenizer getTokenizer() {
return Tokenizer.create(this.index.getManifest());
}
示例3: AnchorTextDocumentCreator
import org.lemurproject.galago.core.tokenize.Tokenizer; //导入方法依赖的package包/类
public AnchorTextDocumentCreator(TupleFlowParameters tp) {
tokenizer = Tokenizer.create(tp.getJSON());
}
示例4: testMergeFlushedSequentialIndexes
import org.lemurproject.galago.core.tokenize.Tokenizer; //导入方法依赖的package包/类
@Test
public void testMergeFlushedSequentialIndexes() throws Exception {
File index1 = null;
File index2 = null;
File indexmerged = null;
try {
Parameters p1 = Parameters.parseString("{\"documentNumberOffset\":0}");
Parameters p2 = Parameters.parseString("{\"documentNumberOffset\":1000}");
MemoryIndex mi1 = new MemoryIndex(new FakeParameters(p1));
MemoryIndex mi2 = new MemoryIndex(new FakeParameters(p2));
Tokenizer tok = Tokenizer.create(p1);
for (int i = 0; i < 100; i++) {
Document d1 = new Document("DOCS1-" + i, "this is sample document " + i);
Document d2 = new Document("DOCS2-" + i, "this is a different document " + i);
tok.tokenize(d1);
tok.tokenize(d2);
mi1.process(d1);
mi2.process(d2);
}
index1 = FileUtility.createTemporaryDirectory();
FlushToDisk.flushMemoryIndex(mi1, index1.getAbsolutePath());
index2 = FileUtility.createTemporaryDirectory();
FlushToDisk.flushMemoryIndex(mi2, index2.getAbsolutePath());
AppTest.verifyIndexStructures(index1);
AppTest.verifyIndexStructures(index2);
indexmerged = FileUtility.createTemporaryDirectory();
Parameters mergeParams = Parameters.create();
mergeParams.set("indexPath", indexmerged.getAbsolutePath());
ArrayList<String> inputs = new ArrayList<String>();
inputs.add(index1.getAbsolutePath());
inputs.add(index2.getAbsolutePath());
mergeParams.set("inputPath", inputs);
mergeParams.set("renumberDocuments", false);
App.run("merge-index", mergeParams, System.out);
AppTest.verifyIndexStructures(indexmerged);
DiskIndex di_index1 = new DiskIndex(index1.getAbsolutePath());
DiskIndex di_index2 = new DiskIndex(index2.getAbsolutePath());
DiskIndex di_merged = new DiskIndex(indexmerged.getAbsolutePath());
assertEquals(di_index1.getIndexPartStatistics("postings").collectionLength, 500);
assertEquals(di_index2.getIndexPartStatistics("postings").collectionLength, 600);
assertEquals(di_merged.getIndexPartStatistics("postings").collectionLength, 1100);
assertEquals(di_index1.getIndexPartStatistics("postings").vocabCount, 104);
assertEquals(di_index2.getIndexPartStatistics("postings").vocabCount, 105);
assertEquals(di_merged.getIndexPartStatistics("postings").vocabCount, 106);
assertEquals(di_index1.getIndexPartStatistics("postings").highestDocumentCount, 100);
assertEquals(di_index2.getIndexPartStatistics("postings").highestDocumentCount, 100);
assertEquals(di_merged.getIndexPartStatistics("postings").highestDocumentCount, 200);
assertEquals(di_index1.getIndexPartStatistics("postings").highestFrequency, 100);
assertEquals(di_index2.getIndexPartStatistics("postings").highestFrequency, 100);
assertEquals(di_merged.getIndexPartStatistics("postings").highestFrequency, 200);
assertEquals(di_merged.getName(50), mi1.getName(50));
assertEquals(di_merged.getName(1050), mi2.getName(1050));
} finally {
if (index1 != null) {
FSUtil.deleteDirectory(index1);
}
if (index2 != null) {
FSUtil.deleteDirectory(index2);
}
if (indexmerged != null) {
FSUtil.deleteDirectory(indexmerged);
}
}
}
示例5: getTokenizer
import org.lemurproject.galago.core.tokenize.Tokenizer; //导入方法依赖的package包/类
/**
* Get a new Tokenizer based on the parameter settings. Since Tokenizer is not assumed to be threadsafe,
* we expect to allocate one for each request.
* @return Tokenizer instance.
*/
public Tokenizer getTokenizer() {
return Tokenizer.create(opts);
}