本文整理匯總了Java中org.apache.lucene.document.Document.getFields方法的典型用法代碼示例。如果您正苦於以下問題:Java Document.getFields方法的具體用法?Java Document.getFields怎麽用?Java Document.getFields使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.lucene.document.Document
的用法示例。
在下文中一共展示了Document.getFields方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: verifyIndexedFieldsForProduct
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
private void verifyIndexedFieldsForProduct(long productKey) {
ProductClassBridge bridge = new ProductClassBridge();
Document doc = new Document();
Product product = mgr.find(Product.class, productKey);
bridge.set("name", product, doc, mockLuceneOptions());
Assert.assertNotNull("Indexed fields expected", doc.getFields());
Properties fields = new Properties();
for (Object o : doc.getFields()) {
Assert.assertTrue("Field is not actually a field object",
o instanceof Field);
Field field = (Field) o;
Assert.assertNotNull("Field has no name", field.name());
Assert.assertNotNull("Field " + field.name() + " has no value",
field.stringValue());
fields.put(field.name(), field.stringValue());
}
for (Object key : expectedFields.keySet()) {
Assert.assertTrue("Field " + key + " expected",
fields.containsKey(key));
Assert.assertEquals("Wrong value for field " + key,
expectedFields.get(key), fields.get(key));
}
}
示例2: getToken
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
@Override
public Token getToken(int index) {
Token ret = cachedTokens.get(index);
if (ret == null) {
ret = new Token();
try {
Document doc = tokenSearcher.doc(index);
for (IndexableField f : doc.getFields())
if (!f.name().startsWith("GGS:"))
ret.getFeatures().put(f.name(), f.stringValue());
else if (f.name().equals("GGS:SpanAnnotation"))
ret.parentAnnotations.add(getAnnotation(f.numericValue().intValue()));
else if (f.name().equals("GGS:Sentence"))
ret.parentSentence = getSentence(f.numericValue().intValue());
ret.indexInSentence = index - ret.parentSentence.getFirstTokenIndexInCorpus();
} catch (IOException e) {
e.printStackTrace();
}
cachedTokens.put(index, ret);
}
return ret;
}
示例3: getAnnotation
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
SpanAnnotation getAnnotation(int i) {
SpanAnnotation ret = cachedAnnotations.get(i);
if (ret == null) {
ret = new SpanAnnotation();
try {
Document doc = annotationSearcher.doc(i);
ret.setSentence(this.getSentence(tokenSearcher.doc(doc.getField("GGS:StartTokenIndex").numericValue().intValue()).getField("GGS:Sentence").numericValue().intValue()));
ret.setName(doc.get("GGS:Name"));
for (IndexableField f : doc.getFields())
if (!f.name().startsWith("GGS:"))
ret.getFeatures().put(f.name(), f.stringValue());
else if (f.name().equals("GGS:StartTokenIndex"))
ret.setStartTokenIndex(f.numericValue().intValue());
else if (f.name().equals("GGS:EndTokenIndex"))
ret.setEndTokenIndex(f.numericValue().intValue());
} catch (IOException e) {
e.printStackTrace();
}
cachedAnnotations.put(i, ret);
}
return ret;
}
示例4: retrieveTerms
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
/**
* Find words for a more-like-this query former.
*
* @param docNum the id of the lucene document from which to find terms
*/
private PriorityQueue<ScoreTerm> retrieveTerms(int docNum) throws IOException {
Map<String, Int> termFreqMap = new HashMap<>();
for (String fieldName : fieldNames) {
final Fields vectors = ir.getTermVectors(docNum);
final Terms vector;
if (vectors != null) {
vector = vectors.terms(fieldName);
} else {
vector = null;
}
// field does not store term vector info
if (vector == null) {
Document d = ir.document(docNum);
IndexableField fields[] = d.getFields(fieldName);
for (IndexableField field : fields) {
final String stringValue = field.stringValue();
if (stringValue != null) {
addTermFrequencies(new FastStringReader(stringValue), termFreqMap, fieldName);
}
}
} else {
addTermFrequencies(termFreqMap, vector, fieldName);
}
}
return createQueue(termFreqMap);
}
示例5: addFreqs
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
private void addFreqs(Document doc, Map<String, FreqHolder> reference) {
Set<String> addedDocFreq = new HashSet<>();
for (IndexableField field : doc.getFields("field")) {
String term = field.stringValue();
FreqHolder freqHolder = reference.get(term);
if (!addedDocFreq.contains(term)) {
freqHolder.docFreq++;
addedDocFreq.add(term);
}
freqHolder.totalTermFreq++;
}
}
示例6: get
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
public List<Field> get(int n, FieldSelector fieldSelector) throws IOException
{
Document document = ReferenceCountingReadOnlyIndexReader.super.document(n, fieldSelector);
List<Field> fields = (List<Field>) document.getFields();
ArrayList<Field> cacheable = new ArrayList<Field>(fields.size());
cacheable.addAll(fields);
return cacheable;
}
開發者ID:Alfresco,項目名稱:alfresco-repository,代碼行數:9,代碼來源:ReferenceCountingReadOnlyIndexReaderFactory.java
示例7: getPathLinkId
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
public String getPathLinkId(int n) throws IOException
{
Document document = document(n, new SingleFieldSelector("ID", true));
Field[] fields = document.getFields("ID");
Field field = fields[fields.length - 1];
return (field == null) ? null : field.stringValue();
}
開發者ID:Alfresco,項目名稱:alfresco-repository,代碼行數:8,代碼來源:ReferenceCountingReadOnlyIndexReaderFactory.java
示例8: detectNodeChanges
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
public void detectNodeChanges(NodeRef nodeRef, SearchService searcher,
Collection<ChildAssociationRef> addedParents, Collection<ChildAssociationRef> deletedParents,
Collection<ChildAssociationRef> createdNodes, Collection<NodeRef> updatedNodes) throws LuceneIndexException
{
boolean nodeExisted = false;
boolean relationshipsChanged = false;
ResultSet results = null;
SearchParameters sp = new SearchParameters();
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.addStore(nodeRef.getStoreRef());
try
{
sp.setQuery("ID:" + SearchLanguageConversion.escapeLuceneQuery(nodeRef.toString()));
results = searcher.query(sp);
for (ResultSetRow row : results)
{
nodeExisted = true;
Document document = ((LuceneResultSetRow) row).getDocument();
Field qname = document.getField("QNAME");
if (qname == null)
{
continue;
}
Collection<Pair<ChildAssociationRef, QName>> allParents = getAllParents(nodeRef, nodeService.getProperties(nodeRef));
Set<ChildAssociationRef> dbParents = new HashSet<ChildAssociationRef>(allParents.size() * 2);
for (Pair<ChildAssociationRef, QName> pair : allParents)
{
ChildAssociationRef qNameRef = tenantService.getName(pair.getFirst());
if ((qNameRef != null) && (qNameRef.getParentRef() != null) && (qNameRef.getQName() != null))
{
dbParents.add(new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, qNameRef.getParentRef(), qNameRef.getQName(), qNameRef.getChildRef()));
}
}
Field[] parents = document.getFields("PARENT");
String[] qnames = qname.stringValue().split(";/");
Set<ChildAssociationRef> addedParentsSet = new HashSet<ChildAssociationRef>(dbParents);
for (int i=0; i<Math.min(parents.length, qnames.length); i++)
{
QName parentQname = QName.createQName(qnames[i]);
parentQname = QName.createQName(parentQname.getNamespaceURI(), ISO9075.decode(parentQname.getLocalName()));
NodeRef parentRef = new NodeRef(parents[i].stringValue());
ChildAssociationRef indexedParent = new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, parentRef, parentQname, nodeRef);
if (!addedParentsSet.remove(indexedParent))
{
deletedParents.add(indexedParent);
relationshipsChanged = true;
}
}
if (addedParents.addAll(addedParentsSet))
{
relationshipsChanged = true;
}
break;
}
if (!nodeExisted)
{
createdNodes.add(nodeService.getPrimaryParent(nodeRef));
}
else if (!relationshipsChanged)
{
updatedNodes.add(nodeRef);
}
}
finally
{
if (results != null) { results.close(); }
}
}
示例9: retrieveTerms
import org.apache.lucene.document.Document; //導入方法依賴的package包/類
/**
* Find words for a more-queryFromDocuments-this query former.
*
* @param docNum the id of the lucene document from which to find terms
* @param fields the list of field of the lucene document from which to extract terms
* @param fieldToTermFreqMap data structure to populate with term frequencies
*/
public Map<String, Map<String, Flt>> retrieveTerms(int docNum, String[] fields, Map<String, Map<String, Flt>> fieldToTermFreqMap) throws IOException {
if(fieldToTermFreqMap == null) {
fieldToTermFreqMap = new HashMap<String, Map<String, Flt>>();
}
if(fields == null || fields.length == 0){
return fieldToTermFreqMap;
}
final Fields vectors = ir.getTermVectors(docNum);
final Document document = ir.document(docNum);
for (String fieldName : fields) {
Map<String, Flt> termFreqMap = null;
if(fieldToTermFreqMap.containsKey(fieldName)){
termFreqMap = fieldToTermFreqMap.get(fieldName);
}
else{
termFreqMap = new HashMap<String, Flt>();
fieldToTermFreqMap.put(fieldName, termFreqMap);
}
Terms vector = null;
if (vectors != null) {
vector = vectors.terms(fieldName);
}
// field does not store term vector info
// even if term vectors enabled, need to extract payload from regular field reader
if (vector == null || isPayloadField(fieldName)) {
IndexableField docFields[] = document.getFields(fieldName);
for (IndexableField field : docFields) {
final String stringValue = field.stringValue();
if (stringValue != null) {
addTermWeights(new StringReader(stringValue), termFreqMap, fieldName);
}
}
} else {
addTermWeights(termFreqMap, vector);
}
}
return fieldToTermFreqMap;
}