本文整理汇总了Java中org.apache.lucene.document.Document.getFields方法的典型用法代码示例。如果您正苦于以下问题:Java Document.getFields方法的具体用法?Java Document.getFields怎么用?Java Document.getFields使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.document.Document
的用法示例。
在下文中一共展示了Document.getFields方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyIndexedFieldsForProduct
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
private void verifyIndexedFieldsForProduct(long productKey) {
ProductClassBridge bridge = new ProductClassBridge();
Document doc = new Document();
Product product = mgr.find(Product.class, productKey);
bridge.set("name", product, doc, mockLuceneOptions());
Assert.assertNotNull("Indexed fields expected", doc.getFields());
Properties fields = new Properties();
for (Object o : doc.getFields()) {
Assert.assertTrue("Field is not actually a field object",
o instanceof Field);
Field field = (Field) o;
Assert.assertNotNull("Field has no name", field.name());
Assert.assertNotNull("Field " + field.name() + " has no value",
field.stringValue());
fields.put(field.name(), field.stringValue());
}
for (Object key : expectedFields.keySet()) {
Assert.assertTrue("Field " + key + " expected",
fields.containsKey(key));
Assert.assertEquals("Wrong value for field " + key,
expectedFields.get(key), fields.get(key));
}
}
示例2: getToken
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@Override
public Token getToken(int index) {
Token ret = cachedTokens.get(index);
if (ret == null) {
ret = new Token();
try {
Document doc = tokenSearcher.doc(index);
for (IndexableField f : doc.getFields())
if (!f.name().startsWith("GGS:"))
ret.getFeatures().put(f.name(), f.stringValue());
else if (f.name().equals("GGS:SpanAnnotation"))
ret.parentAnnotations.add(getAnnotation(f.numericValue().intValue()));
else if (f.name().equals("GGS:Sentence"))
ret.parentSentence = getSentence(f.numericValue().intValue());
ret.indexInSentence = index - ret.parentSentence.getFirstTokenIndexInCorpus();
} catch (IOException e) {
e.printStackTrace();
}
cachedTokens.put(index, ret);
}
return ret;
}
示例3: getAnnotation
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
SpanAnnotation getAnnotation(int i) {
SpanAnnotation ret = cachedAnnotations.get(i);
if (ret == null) {
ret = new SpanAnnotation();
try {
Document doc = annotationSearcher.doc(i);
ret.setSentence(this.getSentence(tokenSearcher.doc(doc.getField("GGS:StartTokenIndex").numericValue().intValue()).getField("GGS:Sentence").numericValue().intValue()));
ret.setName(doc.get("GGS:Name"));
for (IndexableField f : doc.getFields())
if (!f.name().startsWith("GGS:"))
ret.getFeatures().put(f.name(), f.stringValue());
else if (f.name().equals("GGS:StartTokenIndex"))
ret.setStartTokenIndex(f.numericValue().intValue());
else if (f.name().equals("GGS:EndTokenIndex"))
ret.setEndTokenIndex(f.numericValue().intValue());
} catch (IOException e) {
e.printStackTrace();
}
cachedAnnotations.put(i, ret);
}
return ret;
}
示例4: retrieveTerms
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Find words for a more-like-this query former.
*
* @param docNum the id of the lucene document from which to find terms
*/
private PriorityQueue<ScoreTerm> retrieveTerms(int docNum) throws IOException {
Map<String, Int> termFreqMap = new HashMap<>();
for (String fieldName : fieldNames) {
final Fields vectors = ir.getTermVectors(docNum);
final Terms vector;
if (vectors != null) {
vector = vectors.terms(fieldName);
} else {
vector = null;
}
// field does not store term vector info
if (vector == null) {
Document d = ir.document(docNum);
IndexableField fields[] = d.getFields(fieldName);
for (IndexableField field : fields) {
final String stringValue = field.stringValue();
if (stringValue != null) {
addTermFrequencies(new FastStringReader(stringValue), termFreqMap, fieldName);
}
}
} else {
addTermFrequencies(termFreqMap, vector, fieldName);
}
}
return createQueue(termFreqMap);
}
示例5: addFreqs
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
private void addFreqs(Document doc, Map<String, FreqHolder> reference) {
Set<String> addedDocFreq = new HashSet<>();
for (IndexableField field : doc.getFields("field")) {
String term = field.stringValue();
FreqHolder freqHolder = reference.get(term);
if (!addedDocFreq.contains(term)) {
freqHolder.docFreq++;
addedDocFreq.add(term);
}
freqHolder.totalTermFreq++;
}
}
示例6: get
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public List<Field> get(int n, FieldSelector fieldSelector) throws IOException
{
Document document = ReferenceCountingReadOnlyIndexReader.super.document(n, fieldSelector);
List<Field> fields = (List<Field>) document.getFields();
ArrayList<Field> cacheable = new ArrayList<Field>(fields.size());
cacheable.addAll(fields);
return cacheable;
}
开发者ID:Alfresco,项目名称:alfresco-repository,代码行数:9,代码来源:ReferenceCountingReadOnlyIndexReaderFactory.java
示例7: getPathLinkId
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public String getPathLinkId(int n) throws IOException
{
Document document = document(n, new SingleFieldSelector("ID", true));
Field[] fields = document.getFields("ID");
Field field = fields[fields.length - 1];
return (field == null) ? null : field.stringValue();
}
开发者ID:Alfresco,项目名称:alfresco-repository,代码行数:8,代码来源:ReferenceCountingReadOnlyIndexReaderFactory.java
示例8: detectNodeChanges
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public void detectNodeChanges(NodeRef nodeRef, SearchService searcher,
Collection<ChildAssociationRef> addedParents, Collection<ChildAssociationRef> deletedParents,
Collection<ChildAssociationRef> createdNodes, Collection<NodeRef> updatedNodes) throws LuceneIndexException
{
boolean nodeExisted = false;
boolean relationshipsChanged = false;
ResultSet results = null;
SearchParameters sp = new SearchParameters();
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.addStore(nodeRef.getStoreRef());
try
{
sp.setQuery("ID:" + SearchLanguageConversion.escapeLuceneQuery(nodeRef.toString()));
results = searcher.query(sp);
for (ResultSetRow row : results)
{
nodeExisted = true;
Document document = ((LuceneResultSetRow) row).getDocument();
Field qname = document.getField("QNAME");
if (qname == null)
{
continue;
}
Collection<Pair<ChildAssociationRef, QName>> allParents = getAllParents(nodeRef, nodeService.getProperties(nodeRef));
Set<ChildAssociationRef> dbParents = new HashSet<ChildAssociationRef>(allParents.size() * 2);
for (Pair<ChildAssociationRef, QName> pair : allParents)
{
ChildAssociationRef qNameRef = tenantService.getName(pair.getFirst());
if ((qNameRef != null) && (qNameRef.getParentRef() != null) && (qNameRef.getQName() != null))
{
dbParents.add(new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, qNameRef.getParentRef(), qNameRef.getQName(), qNameRef.getChildRef()));
}
}
Field[] parents = document.getFields("PARENT");
String[] qnames = qname.stringValue().split(";/");
Set<ChildAssociationRef> addedParentsSet = new HashSet<ChildAssociationRef>(dbParents);
for (int i=0; i<Math.min(parents.length, qnames.length); i++)
{
QName parentQname = QName.createQName(qnames[i]);
parentQname = QName.createQName(parentQname.getNamespaceURI(), ISO9075.decode(parentQname.getLocalName()));
NodeRef parentRef = new NodeRef(parents[i].stringValue());
ChildAssociationRef indexedParent = new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, parentRef, parentQname, nodeRef);
if (!addedParentsSet.remove(indexedParent))
{
deletedParents.add(indexedParent);
relationshipsChanged = true;
}
}
if (addedParents.addAll(addedParentsSet))
{
relationshipsChanged = true;
}
break;
}
if (!nodeExisted)
{
createdNodes.add(nodeService.getPrimaryParent(nodeRef));
}
else if (!relationshipsChanged)
{
updatedNodes.add(nodeRef);
}
}
finally
{
if (results != null) { results.close(); }
}
}
示例9: retrieveTerms
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Find words for a more-queryFromDocuments-this query former.
*
* @param docNum the id of the lucene document from which to find terms
* @param fields the list of field of the lucene document from which to extract terms
* @param fieldToTermFreqMap data structure to populate with term frequencies
*/
public Map<String, Map<String, Flt>> retrieveTerms(int docNum, String[] fields, Map<String, Map<String, Flt>> fieldToTermFreqMap) throws IOException {
if(fieldToTermFreqMap == null) {
fieldToTermFreqMap = new HashMap<String, Map<String, Flt>>();
}
if(fields == null || fields.length == 0){
return fieldToTermFreqMap;
}
final Fields vectors = ir.getTermVectors(docNum);
final Document document = ir.document(docNum);
for (String fieldName : fields) {
Map<String, Flt> termFreqMap = null;
if(fieldToTermFreqMap.containsKey(fieldName)){
termFreqMap = fieldToTermFreqMap.get(fieldName);
}
else{
termFreqMap = new HashMap<String, Flt>();
fieldToTermFreqMap.put(fieldName, termFreqMap);
}
Terms vector = null;
if (vectors != null) {
vector = vectors.terms(fieldName);
}
// field does not store term vector info
// even if term vectors enabled, need to extract payload from regular field reader
if (vector == null || isPayloadField(fieldName)) {
IndexableField docFields[] = document.getFields(fieldName);
for (IndexableField field : docFields) {
final String stringValue = field.stringValue();
if (stringValue != null) {
addTermWeights(new StringReader(stringValue), termFreqMap, fieldName);
}
}
} else {
addTermWeights(termFreqMap, vector);
}
}
return fieldToTermFreqMap;
}