本文整理汇总了Java中org.apache.lucene.index.Term类的典型用法代码示例。如果您正苦于以下问题:Java Term类的具体用法?Java Term怎么用?Java Term使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Term类属于org.apache.lucene.index包,在下文中一共展示了Term类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: indexDoc
import org.apache.lucene.index.Term; //导入依赖的package包/类
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
try (InputStream stream = Files.newInputStream(file)) {
Document doc = new Document();
Field pathField = new StringField("path", file.toString(), Field.Store.YES);
doc.add(pathField);
doc.add(new LongPoint("modified", lastModified));
doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
System.out.println("adding " + file);
writer.addDocument(doc);
} else {
System.out.println("updating " + file);
writer.updateDocument(new Term("path", file.toString()), doc);
}
}
}
示例2: testSimpleNumericOps
import org.apache.lucene.index.Term; //导入依赖的package包/类
public void testSimpleNumericOps() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
IndexableField f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
BytesRefBuilder bytes = new BytesRefBuilder();
LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
doc = searcher.doc(topDocs.scoreDocs[0].doc);
f = doc.getField("test");
assertThat(f.stringValue(), equalTo("2"));
indexWriter.close();
}
示例3: testVectorHighlighter
import org.apache.lucene.index.Term; //导入依赖的package包/类
public void testVectorHighlighter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
vectorsType.setStoreTermVectors(true);
vectorsType.setStoreTermVectorPositions(true);
vectorsType.setStoreTermVectorOffsets(true);
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, notNullValue());
assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
示例4: convert
import org.apache.lucene.index.Term; //导入依赖的package包/类
@Override
public String convert(Term currentTerm) throws Stop {
if (fieldName != currentTerm.field()) {
throw STOP;
}
String currentText = currentTerm.text();
if (all || currentText.startsWith(value)) {
if (directOnly) {
int index = currentText.indexOf('.', value.length()); //NOI18N
if (index>0) {
currentText = currentText.substring(0,index);
}
}
return currentText;
}
return null;
}
示例5: apply
import org.apache.lucene.index.Term; //导入依赖的package包/类
@Override
public Query apply(Function input, Context context) throws IOException {
Tuple<Reference, Literal> prepare = prepare(input);
if (prepare == null) { return null; }
String fieldName = prepare.v1().info().ident().columnIdent().fqn();
Object value = prepare.v2().value();
if (value instanceof BytesRef) {
RegexQuery query = new RegexQuery(new Term(fieldName, BytesRefs.toBytesRef(value)));
query.setRegexImplementation(new JavaUtilRegexCapabilities(
JavaUtilRegexCapabilities.FLAG_CASE_INSENSITIVE |
JavaUtilRegexCapabilities.FLAG_UNICODE_CASE));
return query;
}
throw new IllegalArgumentException("Can only use ~* with patterns of type string");
}
示例6: deleteDocumentByTypeAndId
import org.apache.lucene.index.Term; //导入依赖的package包/类
private void deleteDocumentByTypeAndId(FeatureType type, Long id, IndexWriter writer) throws IOException {
BooleanQuery.Builder deleteQueryBuilder = new BooleanQuery.Builder();
TermQuery idQuery = new TermQuery(new Term(FeatureIndexFields.FILE_ID.getFieldName(),
id.toString()));
deleteQueryBuilder.add(idQuery, BooleanClause.Occur.MUST);
if (type != FeatureType.GENE) {
TermQuery typeQuery = new TermQuery(new Term(FeatureIndexFields.FEATURE_TYPE.getFieldName(),
type.getFileValue()));
deleteQueryBuilder.add(typeQuery, BooleanClause.Occur.MUST);
} else {
deleteQueryBuilder.add(new TermQuery(new Term(FeatureIndexFields.FEATURE_TYPE.getFieldName(),
FeatureType.BOOKMARK.getFileValue())), BooleanClause.Occur.MUST_NOT);
deleteQueryBuilder.add(new TermQuery(new Term(FeatureIndexFields.FEATURE_TYPE.getFieldName(),
FeatureType.VARIATION.getFileValue())), BooleanClause.Occur.MUST_NOT);
}
writer.deleteDocuments(deleteQueryBuilder.build());
}
示例7: testToQueryPhraseQuery
import org.apache.lucene.index.Term; //导入依赖的package包/类
public void testToQueryPhraseQuery() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
Query query = queryStringQuery("\"term1 term2\"")
.defaultField(STRING_FIELD_NAME)
.phraseSlop(3)
.toQuery(createShardContext());
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query;
assertThat(disjunctionMaxQuery.getDisjuncts().size(), equalTo(1));
assertThat(disjunctionMaxQuery.getDisjuncts().get(0), instanceOf(PhraseQuery.class));
PhraseQuery phraseQuery = (PhraseQuery)disjunctionMaxQuery.getDisjuncts().get(0);
assertThat(phraseQuery.getTerms().length, equalTo(2));
assertThat(phraseQuery.getTerms()[0], equalTo(new Term(STRING_FIELD_NAME, "term1")));
assertThat(phraseQuery.getTerms()[1], equalTo(new Term(STRING_FIELD_NAME, "term2")));
assertThat(phraseQuery.getSlop(), equalTo(3));
}
示例8: readFrom
import org.apache.lucene.index.Term; //导入依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
int termsSize = in.readVInt();
if (termsSize == 0) {
terms = EMPTY_TERMS;
} else {
terms = new Term[termsSize];
for (int i = 0; i < terms.length; i++) {
terms[i] = new Term(in.readString(), in.readBytesRef());
}
}
this.termStatistics = readTermStats(in, terms);
readFieldStats(in, fieldStatistics);
maxDoc = in.readVInt();
}
示例9: getUnderLock
import org.apache.lucene.index.Term; //导入依赖的package包/类
/** Returns the live version (add or delete) for this uid. */
VersionValue getUnderLock(final Term uid) {
Maps currentMaps = maps;
// First try to get the "live" value:
VersionValue value = currentMaps.current.get(uid.bytes());
if (value != null) {
return value;
}
value = currentMaps.old.get(uid.bytes());
if (value != null) {
return value;
}
return tombstones.get(uid.bytes());
}
示例10: constructDefaultLocaleHandlingQuery
import org.apache.lucene.index.Term; //导入依赖的package包/类
private static BooleanQuery constructDefaultLocaleHandlingQuery(
String fieldName, String locale, String defaultLocale,
String searchPhrase) {
BooleanQuery bq1 = new BooleanQuery();
TermQuery tq1 = new TermQuery(
new Term(fieldName + ProductClassBridge.DEFINED_LOCALES_SUFFIX,
defaultLocale));
TermQuery tq2 = new TermQuery(new Term(
fieldName + ProductClassBridge.DEFINED_LOCALES_SUFFIX, locale));
bq1.add(tq1, Occur.MUST);
bq1.add(tq2, Occur.MUST_NOT);
BooleanQuery bq2 = new BooleanQuery();
WildcardQuery wq1 = new WildcardQuery(
new Term(fieldName + defaultLocale,
"*" + searchPhrase.toLowerCase() + "*"));
bq2.add(wq1, Occur.SHOULD);
BooleanQuery finalQuery = new BooleanQuery();
finalQuery.add(bq1, Occur.MUST);
finalQuery.add(bq2, Occur.MUST);
return finalQuery;
}
示例11: getDeletedDocsNotFromAnyDirectoryQuery
import org.apache.lucene.index.Term; //导入依赖的package包/类
/**
* Gets a Query that will return all OAI status deleted records that did not come from any of the existing
* file directories configured in the RepositoryManager.
*
* @return A Query for deleted documents not from any directory
* @see #getDeletedDocsNotFromAnyDirectory
*/
public final Query getDeletedDocsNotFromAnyDirectoryQuery() {
BooleanQuery dirsQ = new BooleanQuery();
dirsQ.add(new TermQuery(new Term("deleted", "true")), BooleanClause.Occur.MUST);
List setInfos = getSetInfos();
if (setInfos != null) {
SetInfo setInfo = null;
for (int i = 0; i < setInfos.size(); i++) {
setInfo = (SetInfo) setInfos.get(i);
dirsQ.add(new TermQuery(new Term("docdir", setInfo.getDirectory().trim())), BooleanClause.Occur.MUST_NOT);
}
}
return dirsQ;
}
示例12: checkIndexContent
import org.apache.lucene.index.Term; //导入依赖的package包/类
private void checkIndexContent(final String elementId,
final String fieldContent, final int expectedAmount) throws IOException {
final IndexReader reader = IndexManager.getInstance().getIndex().getIndexReader();
final IndexSearcher searcher = new IndexSearcher(reader);
final TopDocs topDocs = searcher.search(new TermQuery(new Term(FIELDNAME, fieldContent)), expectedAmount + 10);
assertNotNull(topDocs);
assertTrue(topDocs.totalHits == expectedAmount);
if(expectedAmount > 0) {
final ScoreDoc scoreDoc = topDocs.scoreDocs[0];
assertNotNull(scoreDoc);
final Document doc = reader.document(scoreDoc.doc);
assertNotNull(doc);
assertEquals(fieldContent, doc.get(FIELDNAME));
assertEquals(elementId, doc.get(IIndexElement.FIELD_ID));
assertEquals(INDEX_TYPE, doc.get(IIndexElement.FIELD_INDEX_TYPE));
}
}
示例13: testDisabledFieldNamesField
import org.apache.lucene.index.Term; //导入依赖的package包/类
public void testDisabledFieldNamesField() throws Exception {
QueryShardContext context = createShardContext();
context.getMapperService().merge("new_type",
new CompressedXContent(
PutMappingRequest.buildFromSimplifiedDef("new_type",
"foo", "type=text",
"_field_names", "enabled=false").string()),
MapperService.MergeReason.MAPPING_UPDATE, true);
QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*");
Query query = queryBuilder.toQuery(context);
Query expected = new WildcardQuery(new Term("foo", "*"));
assertThat(query, equalTo(expected));
context.getMapperService().merge("new_type",
new CompressedXContent(
PutMappingRequest.buildFromSimplifiedDef("new_type",
"foo", "type=text",
"_field_names", "enabled=true").string()),
MapperService.MergeReason.MAPPING_UPDATE, true);
}
示例14: doAssertLuceneQuery
import org.apache.lucene.index.Term; //导入依赖的package包/类
@Override
protected void doAssertLuceneQuery(TermQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException {
assertThat(query, either(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)));
MappedFieldType mapper = context.getQueryShardContext().fieldMapper(queryBuilder.fieldName());
if (query instanceof TermQuery) {
TermQuery termQuery = (TermQuery) query;
assertThat(termQuery.getTerm().field(), equalTo(queryBuilder.fieldName()));
if (mapper != null) {
Term term = ((TermQuery) mapper.termQuery(queryBuilder.value(), null)).getTerm();
assertThat(termQuery.getTerm(), equalTo(term));
} else {
assertThat(termQuery.getTerm().bytes(), equalTo(BytesRefs.toBytesRef(queryBuilder.value())));
}
} else {
assertEquals(query, mapper.termQuery(queryBuilder.value(), null));
}
}
示例15: GetSimilarTerms
import org.apache.lucene.index.Term; //导入依赖的package包/类
@Override
public SimilarTermModel[] GetSimilarTerms(String field, String[] queryTerms) {
SimilarTermModel[] output = new SimilarTermModel[queryTerms.length];
for (int i = 0; i < queryTerms.length; i++) {
TermWeightTuple[] similar = new TermWeightTuple[1];
similar[0] = similarTerm(field);
output[i] = new SimilarTermModel(new Term(field, queryTerms[i]), similar);
}
return output;
}
开发者ID:sebastian-hofstaetter,项目名称:ir-generalized-translation-models,代码行数:14,代码来源:SimilarityApiMock.java