本文整理汇总了Java中org.apache.lucene.document.Field类的典型用法代码示例。如果您正苦于以下问题:Java Field类的具体用法?Java Field怎么用?Java Field使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Field类属于org.apache.lucene.document包,在下文中一共展示了Field类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSimpleNumericOps
import org.apache.lucene.document.Field; //导入依赖的package包/类
public void testSimpleNumericOps() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
IndexableField f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
BytesRefBuilder bytes = new BytesRefBuilder();
LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
doc = searcher.doc(topDocs.scoreDocs[0].doc);
f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
indexWriter.close();
}
示例2: testSortValues
import org.apache.lucene.document.Field; //导入依赖的package包/类
public void testSortValues() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
for (int i = 0; i < 10; i++) {
Document document = new Document();
String text = new String(new char[]{(char) (97 + i), (char) (97 + i)});
document.add(new TextField("str", text, Field.Store.YES));
document.add(new SortedDocValuesField("str", new BytesRef(text)));
indexWriter.addDocument(document);
}
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter));
IndexSearcher searcher = new IndexSearcher(reader);
TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING)));
for (int i = 0; i < 10; i++) {
FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
}
}
示例3: processQuery
import org.apache.lucene.document.Field; //导入依赖的package包/类
void processQuery(Query query, ParseContext context) {
ParseContext.Document doc = context.doc();
FieldType pft = (FieldType) this.fieldType();
QueryAnalyzer.Result result;
try {
result = QueryAnalyzer.analyze(query);
} catch (QueryAnalyzer.UnsupportedQueryException e) {
doc.add(new Field(pft.extractionResultField.name(), EXTRACTION_FAILED, extractionResultField.fieldType()));
return;
}
for (Term term : result.terms) {
BytesRefBuilder builder = new BytesRefBuilder();
builder.append(new BytesRef(term.field()));
builder.append(FIELD_VALUE_SEPARATOR);
builder.append(term.bytes());
doc.add(new Field(queryTermsField.name(), builder.toBytesRef(), queryTermsField.fieldType()));
}
if (result.verified) {
doc.add(new Field(extractionResultField.name(), EXTRACTION_COMPLETE, extractionResultField.fieldType()));
} else {
doc.add(new Field(extractionResultField.name(), EXTRACTION_PARTIAL, extractionResultField.fieldType()));
}
}
示例4: fixWeightedFragInfo
import org.apache.lucene.document.Field; //导入依赖的package包/类
/**
* Fixes problems with broken analysis chains if positions and offsets are messed up that can lead to
* {@link StringIndexOutOfBoundsException} in the {@link FastVectorHighlighter}
*/
public static WeightedFragInfo fixWeightedFragInfo(FieldMapper mapper, Field[] values, WeightedFragInfo fragInfo) {
assert fragInfo != null : "FragInfo must not be null";
assert mapper.fieldType().name().equals(values[0].name()) : "Expected FieldMapper for field " + values[0].name();
if (!fragInfo.getSubInfos().isEmpty() && (containsBrokenAnalysis(mapper.fieldType().indexAnalyzer()))) {
/* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
* which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
* the fragments based on their offsets rather than using soley the positions as it is done in
* the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
* than in this hack... aka. "we are are working on in!" */
final List<SubInfo> subInfos = fragInfo.getSubInfos();
CollectionUtil.introSort(subInfos, new Comparator<SubInfo>() {
@Override
public int compare(SubInfo o1, SubInfo o2) {
int startOffset = o1.getTermsOffsets().get(0).getStartOffset();
int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset();
return FragmentBuilderHelper.compare(startOffset, startOffset2);
}
});
return new WeightedFragInfo(Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(),
fragInfo.getStartOffset()), fragInfo.getEndOffset(), subInfos, fragInfo.getTotalBoost());
} else {
return fragInfo;
}
}
示例5: getDocument
import org.apache.lucene.document.Field; //导入依赖的package包/类
private Document getDocument(File file) throws IOException {
Document document = new Document();
// index file contents
Field contentField = new Field(LuceneConstants.CONTENTS, new FileReader(file), TextField.TYPE_NOT_STORED);
// index file name
Field fileNameField = new Field(LuceneConstants.FILE_NAME, file.getName(), TextField.TYPE_STORED);
// index file path
Field filePathField = new Field(LuceneConstants.FILE_PATH, file.getCanonicalPath(), TextField.TYPE_STORED);
document.add(contentField);
document.add(fileNameField);
document.add(filePathField);
return document;
}
示例6: testCache
import org.apache.lucene.document.Field; //导入依赖的package包/类
/** Test that version map cache works, is evicted on close, etc */
public void testCache() throws Exception {
int size = Versions.lookupStates.size();
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
writer.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(writer);
// should increase cache size by 1
assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
assertEquals(size+1, Versions.lookupStates.size());
// should be cache hit
assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
assertEquals(size+1, Versions.lookupStates.size());
reader.close();
writer.close();
// core should be evicted from the map
assertEquals(size, Versions.lookupStates.size());
dir.close();
}
示例7: createFields
import org.apache.lucene.document.Field; //导入依赖的package包/类
@Override
public List<Field> createFields(String name, Number value,
boolean indexed, boolean docValued, boolean stored) {
List<Field> fields = new ArrayList<>();
if (indexed) {
fields.add(new HalfFloatPoint(name, value.floatValue()));
}
if (docValued) {
fields.add(new SortedNumericDocValuesField(name,
HalfFloatPoint.halfFloatToSortableShort(value.floatValue())));
}
if (stored) {
fields.add(new StoredField(name, value.floatValue()));
}
return fields;
}
示例8: parseCreateField
import org.apache.lucene.document.Field; //导入依赖的package包/类
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
ValueAndBoost valueAndBoost = StringFieldMapper.parseCreateFieldForString(context, null /* Out null value is an int so we convert*/, fieldType().boost());
if (valueAndBoost.value() == null && fieldType().nullValue() == null) {
return;
}
if (fieldType().indexOptions() != NONE || fieldType().stored() || fieldType().hasDocValues()) {
int count;
if (valueAndBoost.value() == null) {
count = fieldType().nullValue();
} else {
count = countPositions(analyzer, simpleName(), valueAndBoost.value());
}
addIntegerFields(context, fields, count, valueAndBoost.boost());
}
}
示例9: parse
import org.apache.lucene.document.Field; //导入依赖的package包/类
/**
* Parse using the provided {@link ParseContext} and return a mapping
* update if dynamic mappings modified the mappings, or {@code null} if
* mappings were not modified.
*/
public Mapper parse(ParseContext context) throws IOException {
final List<IndexableField> fields = new ArrayList<>(2);
try {
parseCreateField(context, fields);
for (IndexableField field : fields) {
if (!customBoost()
// don't set boosts eg. on dv fields
&& field.fieldType().indexOptions() != IndexOptions.NONE
&& indexCreatedVersion.before(Version.V_5_0_0_alpha1)) {
((Field)(field)).setBoost(fieldType().boost());
}
context.doc().add(field);
}
} catch (Exception e) {
throw new MapperParsingException("failed to parse [" + fieldType().name() + "]", e);
}
multiFields.parse(this, context);
return null;
}
示例10: parseCreateField
import org.apache.lucene.document.Field; //导入依赖的package包/类
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
if (fieldType().indexOptions() == IndexOptions.NONE && !fieldType().stored() && !fieldType().hasDocValues()) {
return;
}
Boolean value = context.parseExternalValue(Boolean.class);
if (value == null) {
XContentParser.Token token = context.parser().currentToken();
if (token == XContentParser.Token.VALUE_NULL) {
if (fieldType().nullValue() != null) {
value = fieldType().nullValue();
}
} else {
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
value = context.parser().booleanValue();
} else {
value = context.parser().booleanValueLenient();
if (context.parser().isBooleanValueLenient() != context.parser().isBooleanValue()) {
String rawValue = context.parser().text();
deprecationLogger.deprecated("Expected a boolean for property [{}] but got [{}]", fieldType().name(), rawValue);
}
}
}
}
if (value == null) {
return;
}
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
fields.add(new Field(fieldType().name(), value ? "T" : "F", fieldType()));
}
if (fieldType().hasDocValues()) {
fields.add(new SortedNumericDocValuesField(fieldType().name(), value ? 1 : 0));
}
}
示例11: testNRTSearchOnClosedWriter
import org.apache.lucene.document.Field; //导入依赖的package包/类
public void testNRTSearchOnClosedWriter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
DirectoryReader reader = DirectoryReader.open(indexWriter);
for (int i = 0; i < 100; i++) {
Document document = new Document();
TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
field.setBoost(i);
document.add(field);
indexWriter.addDocument(document);
}
reader = refreshReader(reader);
indexWriter.close();
TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator();
termDocs.next();
}
示例12: ParsedDocument
import org.apache.lucene.document.Field; //导入依赖的package包/类
public ParsedDocument(Field version,
SeqNoFieldMapper.SequenceID seqID,
String id,
String type,
String routing,
List<Document> documents,
BytesReference source,
XContentType xContentType,
Mapping dynamicMappingsUpdate) {
this.version = version;
this.seqID = seqID;
this.id = id;
this.type = type;
this.uid = Uid.createUidAsBytes(type, id);
this.routing = routing;
this.documents = documents;
this.source = source;
this.dynamicMappingsUpdate = dynamicMappingsUpdate;
this.xContentType = xContentType;
}
示例13: getIndex
import org.apache.lucene.document.Field; //导入依赖的package包/类
private Engine.Index getIndex(final String id) {
final String type = "test";
final ParseContext.Document document = new ParseContext.Document();
document.add(new TextField("test", "test", Field.Store.YES));
final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
final SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
final BytesReference source = new BytesArray(new byte[] { 1 });
final ParsedDocument doc =
new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null);
return new Engine.Index(new Term("_uid", doc.uid()), doc);
}
示例14: addFtsStatusDoc
import org.apache.lucene.document.Field; //导入依赖的package包/类
private void addFtsStatusDoc(List<Document> docs, FTSStatus ftsStatus, NodeRef nodeRef,
NodeRef.Status nodeStatus)
{
// If we are being called during FTS failover, then don't bother generating a new doc
if (ftsStatus == FTSStatus.Clean)
{
return;
}
Document doc = new Document();
doc.add(new Field("ID", GUID.generate(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO));
doc.add(new Field("FTSREF", nodeRef.toString(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO));
doc.add(new Field("TX", nodeStatus == null ? AlfrescoTransactionSupport.getTransactionId() : nodeStatus
.getChangeTxnId(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO));
doc.add(new Field("FTSSTATUS", ftsStatus.name(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO));
docs.add(doc);
}
示例15: parse
import org.apache.lucene.document.Field; //导入依赖的package包/类
/**
* Parse using the provided {@link ParseContext} and return a mapping
* update if dynamic mappings modified the mappings, or {@code null} if
* mappings were not modified.
*/
public Mapper parse(ParseContext context) throws IOException {
final List<Field> fields = new ArrayList<>(2);
try {
parseCreateField(context, fields);
for (Field field : fields) {
if (!customBoost()) {
field.setBoost(fieldType().boost());
}
context.doc().add(field);
}
} catch (Exception e) {
throw new MapperParsingException("failed to parse [" + fieldType().names().fullName() + "]", e);
}
multiFields.parse(this, context);
return null;
}