本文整理匯總了Java中org.apache.lucene.document.Document類的典型用法代碼示例。如果您正苦於以下問題:Java Document類的具體用法?Java Document怎麽用?Java Document使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Document類屬於org.apache.lucene.document包,在下文中一共展示了Document類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: indexDoc
import org.apache.lucene.document.Document; //導入依賴的package包/類
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
try (InputStream stream = Files.newInputStream(file)) {
Document doc = new Document();
Field pathField = new StringField("path", file.toString(), Field.Store.YES);
doc.add(pathField);
doc.add(new LongPoint("modified", lastModified));
doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
System.out.println("adding " + file);
writer.addDocument(doc);
} else {
System.out.println("updating " + file);
writer.updateDocument(new Term("path", file.toString()), doc);
}
}
}
示例2: copyFieldsNoDeletions
import org.apache.lucene.document.Document; //導入依賴的package包/類
private int copyFieldsNoDeletions(MergeState mergeState, final AtomicReader reader,
final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
throws IOException {
final int maxDoc = reader.maxDoc();
int docCount = 0;
if (matchingFieldsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
while (docCount < maxDoc) {
int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, docCount, len);
addRawDocuments(stream, rawDocLengths, len);
docCount += len;
mergeState.checkAbort.work(300 * len);
}
} else {
for (; docCount < maxDoc; docCount++) {
// NOTE: it's very important to first assign to doc then pass it to
// fieldsWriter.addDocument; see LUCENE-1282
Document doc = reader.document(docCount);
addDocument(doc, mergeState.fieldInfos);
mergeState.checkAbort.work(300);
}
}
return docCount;
}
示例3: locateContainer
import org.apache.lucene.document.Document; //導入依賴的package包/類
protected boolean locateContainer(String nodeRef, IndexReader reader)
{
boolean found = false;
try
{
TermDocs td = reader.termDocs(new Term("ID", nodeRef));
while (td.next())
{
int doc = td.doc();
Document document = reader.document(doc);
if (document.getField("ISCONTAINER") != null)
{
found = true;
break;
}
}
td.close();
}
catch (IOException e)
{
throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e);
}
return found;
}
示例4: testGetParentIdNoParentField
import org.apache.lucene.document.Document; //導入依賴的package包/類
public void testGetParentIdNoParentField() throws Exception {
ParentFieldMapper fieldMapper = createParentFieldMapper();
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig());
Document document = new Document();
document.add(new SortedDocValuesField("different_field", new BytesRef("1")));
indexWriter.addDocument(document);
indexWriter.close();
IndexReader indexReader = DirectoryReader.open(directory);
String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0);
assertNull(id);
indexReader.close();
directory.close();
}
示例5: testKeywords
import org.apache.lucene.document.Document; //導入依賴的package包/類
@Test
public void testKeywords() throws IOException {
LuceneIndex index = new LuceneIndex();
try (Reference<IndexWriter> writer = index.provideWriter()) {
Document doc1 = new Document();
LuceneFields.Keyword.add(doc1,"name", "John", LuceneFields.FieldOptions.STORE_INDEX);
writer.use().addDocument(doc1);
Document doc2 = new Document();
LuceneFields.Keyword.add(doc2,"name", "James", LuceneFields.FieldOptions.STORE_INDEX);
writer.use().addDocument(doc2);
}
LuceneSearchResults results = index.search(LuceneSearch.builder().query(new TermQuery(new Term("name", "James"))).build());
assertTrue(results.hasCount());
assertEquals(1, (int)results.count());
assertEquals("James", results.toList().get(0).getField("name").stringValue());
}
示例6: addDocucmentToIndexer
import org.apache.lucene.document.Document; //導入依賴的package包/類
@Override
public void addDocucmentToIndexer(List<App> apps) {
Document doc = null;
synchronized (lock) {
if (CollectionUtils.isEmpty(apps)) {
return;
}
for (App app : apps) {
try {
doc = newDocument(app, allTags4AppHashMap);
indexWriter.addDocument(doc);
} catch (Exception e) {
logger.error("Exception", e);
}
}
}
}
示例7: getResources
import org.apache.lucene.document.Document; //導入依賴的package包/類
/**
* Returns a set of source files containing reference(s) to given type element.
* @param element the {@link ElementHandle} of a {@link TypeElement} for which usages should be found
* @param searchKind type of reference, {@see SearchKind}
* @param scope to search in {@see SearchScope}
* @return set of {@link FileObject}s containing the reference(s)
* It may return null when the caller is a CancellableTask<CompilationInfo> and is cancelled
* inside call of this method.
*/
public @NullUnknown Set<FileObject> getResources (
final @NonNull ElementHandle<TypeElement> element,
final @NonNull Set<SearchKind> searchKind,
final @NonNull Set<? extends SearchScopeType> scope) {
return searchImpl(
element,
searchKind,
scope,
new Convertor<ClassIndexImpl, Convertor<Document,FileObject>>() {
@NonNull
@Override
public Convertor<Document, FileObject> convert(@NonNull final ClassIndexImpl p) {
return DocumentUtil.fileObjectConvertor (ClassIndex.ResourceType.SOURCE, p.getSourceRoots());
}
});
}
示例8: getResourcesForPackage
import org.apache.lucene.document.Document; //導入依賴的package包/類
/**
* Returns a set of source files containing reference(s) to given package element.
* @param element the {@link ElementHandle} of a {@link PackageElement} for which usages should be found
* @param searchKind type of reference, {@see SearchKind}
* @param scope to search in {@see SearchScope}
* @return set of {@link FileObject}s containing the reference(s)
* It may return null when the caller is a CancellableTask<CompilationInfo> and is cancelled
* inside call of this method.
* @since 0.89
*/
public @NullUnknown Set<FileObject> getResourcesForPackage (
final @NonNull ElementHandle<PackageElement> element,
final @NonNull Set<SearchKind> searchKind,
final @NonNull Set<? extends SearchScopeType> scope) {
return searchImpl(
element,
searchKind,
scope,
new Convertor<ClassIndexImpl, Convertor<Document,FileObject>>() {
@NonNull
@Override
public Convertor<Document, FileObject> convert(@NonNull final ClassIndexImpl p) {
return DocumentUtil.fileObjectConvertor (ClassIndex.ResourceType.SOURCE, p.getSourceRoots());
}
});
}
示例9: addAppsToIndexerWriter
import org.apache.lucene.document.Document; //導入依賴的package包/類
private void addAppsToIndexerWriter(List<App> list, IndexWriter indexerWriter) {
Field name = new Field(fieldName, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
NumericField catalog = new NumericField("catalog", Field.Store.NO, true);
NumericField downloadRank = new NumericField("downloadRank", Field.Store.NO, true);
for (App a : list) {
try {
Document doc = new Document();
name.setValue(a.getName().toLowerCase());
doc.add(name);
downloadRank.setIntValue(a.getDownloadRank());
doc.add(downloadRank);
catalog.setIntValue(a.getCatalog());
doc.add(catalog);
indexerWriter.addDocument(doc);
} catch (Exception e) {
logger.error("Exception", e);
}
}
}
示例10: testSingleValued
import org.apache.lucene.document.Document; //導入依賴的package包/類
public void testSingleValued() throws IOException {
Directory dir = newDirectory();
// we need the default codec to check for singletons
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setCodec(TestUtil.getDefaultCodec()));
Document doc = new Document();
for (IndexableField f : NumberFieldMapper.NumberType.HALF_FLOAT.createFields("half_float", 3f, false, true, false)) {
doc.add(f);
}
w.addDocument(doc);
final DirectoryReader dirReader = DirectoryReader.open(w);
LeafReader reader = getOnlyLeafReader(dirReader);
SortedNumericDoubleValues values = new SortedNumericDVIndexFieldData.SortedNumericHalfFloatFieldData(
reader, "half_float").getDoubleValues();
assertNotNull(FieldData.unwrapSingleton(values));
values.setDocument(0);
assertEquals(1, values.count());
assertEquals(3f, values.valueAt(0), 0f);
IOUtils.close(dirReader, w, dir);
}
示例11: run
import org.apache.lucene.document.Document; //導入依賴的package包/類
@Override
public void run() {
int i = 0;
while (i < 10000) {
try {
if (data.size() <= i) {
sleep(1);
continue;
}
final String key = "key" + i;
final String val = "value" + i;
final List<Document> documents = index.searchForDocuments(new TermQuery(new Term(key, val)), 10, new Sort(new SortField(key, SortField.Type.STRING)));
if (documents.size() != 1) {
throw new RuntimeException("Invalid number of matching documents for " + key + ", found " + documents);
}
++i;
} catch (IOException ioe) {
error = ioe;
break;
} catch (InterruptedException e) {
} catch (AlreadyClosedException ace) {
error = ace;
break;
}
}
}
示例12: getToken
import org.apache.lucene.document.Document; //導入依賴的package包/類
@Override
public Token getToken(int index) {
Token ret = cachedTokens.get(index);
if (ret == null) {
ret = new Token();
try {
Document doc = tokenSearcher.doc(index);
for (IndexableField f : doc.getFields())
if (!f.name().startsWith("GGS:"))
ret.getFeatures().put(f.name(), f.stringValue());
else if (f.name().equals("GGS:SpanAnnotation"))
ret.parentAnnotations.add(getAnnotation(f.numericValue().intValue()));
else if (f.name().equals("GGS:Sentence"))
ret.parentSentence = getSentence(f.numericValue().intValue());
ret.indexInSentence = index - ret.parentSentence.getFirstTokenIndexInCorpus();
} catch (IOException e) {
e.printStackTrace();
}
cachedTokens.put(index, ret);
}
return ret;
}
示例13: convert
import org.apache.lucene.document.Document; //導入依賴的package包/類
@Override
public Document convert(String p) {
if (signal != null) {
signal.countDown();
}
if (slot != null) {
try {
this.slot.await();
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
}
final Document doc = new Document();
doc.add(new Field(FLD_KEY, p, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS)); //NOI18N
return doc;
}
示例14: testNoTokens
import org.apache.lucene.document.Document; //導入依賴的package包/類
public void testNoTokens() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));
FieldType allFt = getAllFieldType();
Document doc = new Document();
doc.add(new Field("_id", "1", StoredField.TYPE));
doc.add(new AllField("_all", "", 2.0f, allFt));
indexWriter.addDocument(doc);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
assertThat(docs.totalHits, equalTo(1));
assertThat(docs.scoreDocs[0].doc, equalTo(0));
}
示例15: testMinDocCount
import org.apache.lucene.document.Document; //導入依賴的package包/類
public void testMinDocCount() throws Exception {
try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
for (long value : new long[] {7, 3, -10, -6, 5, 50}) {
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("field", value));
w.addDocument(doc);
}
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field")
.interval(10)
.minDocCount(2);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader);
Histogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
assertEquals(2, histogram.getBuckets().size());
assertEquals(-10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
assertEquals(0d, histogram.getBuckets().get(1).getKey());
assertEquals(3, histogram.getBuckets().get(1).getDocCount());
}
}
}