当前位置: 首页>>代码示例>>Java>>正文


Java DocsEnum.NO_MORE_DOCS属性代码示例

本文整理汇总了Java中org.apache.lucene.index.DocsEnum.NO_MORE_DOCS属性的典型用法代码示例。如果您正苦于以下问题:Java DocsEnum.NO_MORE_DOCS属性的具体用法?Java DocsEnum.NO_MORE_DOCS怎么用?Java DocsEnum.NO_MORE_DOCS使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.lucene.index.DocsEnum的用法示例。


在下文中一共展示了DocsEnum.NO_MORE_DOCS属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPosEnum

protected DocsAndPositionsEnum getPosEnum(IndexReader r, int docid, Term t)
		throws IOException {
	List<AtomicReaderContext> leaves = r.getContext().leaves();
	for (AtomicReaderContext context : leaves) {
		AtomicReader reader = context.reader();
		DocsAndPositionsEnum termPositions = reader.termPositionsEnum(t);
		int doc;
		while ((doc = termPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS
				&& doc != docid) {
		}
		if (doc != DocsEnum.NO_MORE_DOCS) {
			return termPositions;
		}
	}
	assertFalse("Expected positions enum for doc " + docid, true);
	return null; // will never come here
}
 
开发者ID:arne-cl,项目名称:fangorn,代码行数:17,代码来源:IndexTestCase.java

示例2: score

@Override
public boolean score(Collector collector, int max) throws IOException {
  FakeScorer fakeScorer = new FakeScorer();
  collector.setScorer(fakeScorer);
  if (doc == -1) {
    doc = nextDocOutOfOrder();
  }
  while(doc < max) {
    fakeScorer.doc = doc;
    fakeScorer.score = scores[ords[scoreUpto]];
    collector.collect(doc);
    doc = nextDocOutOfOrder();
  }

  return doc != DocsEnum.NO_MORE_DOCS;
}
 
开发者ID:europeana,项目名称:search,代码行数:16,代码来源:TermsIncludingScoreQuery.java

示例3: lookup

/** Returns docID if found, else -1. */
public int lookup(BytesRef id, long version) throws IOException {
  for(int seg=0;seg<numSegs;seg++) {
    if (((IDVersionSegmentTermsEnum) termsEnums[seg]).seekExact(id, version)) {
      if (VERBOSE) {
        System.out.println("  found in seg=" + termsEnums[seg]);
      }
      docsEnums[seg] = termsEnums[seg].docs(liveDocs[seg], docsEnums[seg], 0);
      int docID = docsEnums[seg].nextDoc();
      if (docID != DocsEnum.NO_MORE_DOCS) {
        lastVersion = ((IDVersionSegmentTermsEnum) termsEnums[seg]).getVersion();
        return docBases[seg] + docID;
      }
      assert hasDeletions;
    }
  }

  return -1;
}
 
开发者ID:europeana,项目名称:search,代码行数:19,代码来源:TestIDVersionPostingsFormat.java

示例4: getDocumentsWithWordAsSet

@Override
public void getDocumentsWithWordAsSet(String word, IntOpenHashSet documents) {
    DocsEnum docs = null;
    Term term = new Term(fieldName, word);
    try {
        int baseDocId;
        for (int i = 0; i < reader.length; i++) {
            docs = reader[i].termDocsEnum(term);
            baseDocId = contexts[i].docBase;
            if (docs != null) {
                while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                    documents.add(baseDocId + docs.docID());
                }
            }
        }
    } catch (IOException e) {
        LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
    }
}
 
开发者ID:dice-group,项目名称:Palmetto,代码行数:19,代码来源:LuceneCorpusAdapter.java

示例5: getDocumentsWithWord

@Override
public void getDocumentsWithWord(String word, IntArrayList documents) {
    DocsEnum docs = null;
    Term term = new Term(fieldName, word);
    try {
        int baseDocId;
        for (int i = 0; i < reader.length; i++) {
            docs = reader[i].termDocsEnum(term);
            baseDocId = contexts[i].docBase;
            if (docs != null) {
                while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                    documents.add(docs.docID() + baseDocId);
                }
            }
        }
    } catch (IOException e) {
        LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
    }
}
 
开发者ID:dice-group,项目名称:Palmetto,代码行数:19,代码来源:LuceneCorpusAdapter.java

示例6: assertFreqPosAndPayload

private void assertFreqPosAndPayload(Term t, int[] expFreq, int[][] expPos,
		int[][][] expPay, int numAtomicReaders) throws IOException {
	List<AtomicReaderContext> leaves = r.getContext().leaves();
	for (AtomicReaderContext context : leaves) {
		AtomicReader reader = context.reader();
		DocsAndPositionsEnum termPositions = reader.termPositionsEnum(t);
		int docIndex = 0;
		while (termPositions.nextDoc() != DocsEnum.NO_MORE_DOCS) {
			assertEquals("Incorrect doc " + docIndex + " freq",
					expFreq[docIndex], termPositions.freq());
			assertEquals("Incorrect doc " + docIndex + " pos length",
					expPos[docIndex].length, termPositions.freq());
			int posIndex = 0;
			while (posIndex < termPositions.freq()) {
				int position = termPositions.nextPosition();
				assertEquals("Incorrect pos " + posIndex + " in doc "
						+ docIndex, expPos[docIndex][posIndex], position);
				BytesRef payload = termPositions.getPayload();
				int[] expPayload = expPay[docIndex][posIndex];
				String[] payloadDesc = new String[] { "left", "right",
						"depth", "parent" };
				for (int j = 0; j < 4; j++) {
					assertEquals(
							"Incorrect " + payloadDesc[j] + " payload",
							expPayload[j],
							payload.bytes[payload.offset + j]);
				}
				posIndex++;
			}
			docIndex++;
		}
		numAtomicReaders++;
	}
	assertEquals("Expected one atomic reader", 1, numAtomicReaders);
}
 
开发者ID:arne-cl,项目名称:fangorn,代码行数:35,代码来源:TreeAnalyzerTest.java

示例7: requestDocumentsWithWord

protected void requestDocumentsWithWord(String word, IntObjectOpenHashMap<IntArrayList[]> positionsInDocs,
        IntIntOpenHashMap docLengths, int wordId, int numberOfWords) {
    DocsAndPositionsEnum docPosEnum = null;
    Term term = new Term(fieldName, word);
    int localDocId, globalDocId, baseDocId;
    IntArrayList positions[];
    try {
        for (int i = 0; i < reader.length; i++) {
            docPosEnum = reader[i].termPositionsEnum(term);
            baseDocId = contexts[i].docBase;
            if (docPosEnum != null) {
                while (docPosEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                    localDocId = docPosEnum.docID();
                    globalDocId = localDocId + baseDocId;
                    // if this is the first word and we found a new document
                    if (!positionsInDocs.containsKey(globalDocId)) {
                        positions = new IntArrayList[numberOfWords];
                        positionsInDocs.put(globalDocId, positions);
                    } else {
                        positions = positionsInDocs.get(globalDocId);
                    }
                    if (positions[wordId] == null) {
                        positions[wordId] = new IntArrayList();
                    }
                    // Go through the positions inside this document
                    for (int p = 0; p < docPosEnum.freq(); ++p) {
                        positions[wordId].add(docPosEnum.nextPosition());
                    }
                    if (!docLengths.containsKey(globalDocId)) {
                        // Get the length of the document
                        docLengths.put(globalDocId, reader[i].document(localDocId).getField(docLengthFieldName)
                                .numericValue().intValue());
                    }
                }
            }
        }
    } catch (IOException e) {
        LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
    }
}
 
开发者ID:dice-group,项目名称:Palmetto,代码行数:40,代码来源:WindowSupportingLuceneCorpusAdapter.java

示例8: split

OpenBitSet[] split(AtomicReaderContext readerContext) throws IOException {
  AtomicReader reader = readerContext.reader();
  OpenBitSet[] docSets = new OpenBitSet[ranges.size()];
  for (int i=0; i<docSets.length; i++) {
    docSets[i] = new OpenBitSet(reader.maxDoc());
  }
  Bits liveDocs = reader.getLiveDocs();

  Fields fields = reader.fields();
  Terms terms = fields==null ? null : fields.terms(field.getName());
  TermsEnum termsEnum = terms==null ? null : terms.iterator(null);
  if (termsEnum == null) return docSets;

  BytesRef term = null;
  DocsEnum docsEnum = null;

  for (;;) {
    term = termsEnum.next();
    if (term == null) break;

    // figure out the hash for the term
    // TODO: hook in custom hashes (or store hashes)
    int hash = Hash.murmurhash3_x86_32(term.bytes, term.offset, term.length, 0);

    docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
    for (;;) {
      int doc = docsEnum.nextDoc();
      if (doc == DocsEnum.NO_MORE_DOCS) break;
      for (int i=0; i<rangesArr.length; i++) {      // inner-loop: use array here for extra speed.
        if (rangesArr[i].includes(hash)) {
          docSets[i].fastSet(doc);
        }
      }
    }
  }

  return docSets;
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:38,代码来源:SolrIndexSplitter.java

示例9: getDocIdSet

@SuppressWarnings("deprecation")
@Override
   public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
       
   	long startTime = System.currentTimeMillis();
       int max = context.reader().maxDoc();
       
       OpenBitSet  bits = new OpenBitSet(max);
       
       //OpenBitSet filterBits = new OpenBitSet(max);
       //DocIdSet it;
           
       DocsEnum termDocs =null;
       
       String name=null,val=null;    
       //int ct=context.reader().docFreq(new Term("title","testtitle1"));
       
       if (paramMap.size()==1){ 
           
         Iterator iter = paramMap.entrySet().iterator();
         if (iter.hasNext()) {
              Map.Entry entry = (Map.Entry) iter.next();
              name = (String)entry.getKey();
              val = (String)entry.getValue();
         }
           
          termDocs =context.reader().termDocsEnum(new Term(name,val));
       }
       
       if(termDocs == null){
            return null;
       }
           
       while(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS){
          bits.set(termDocs.docID());
       }

                      
       //}
       long endTime = System.currentTimeMillis();
       dlogger.debug("Filtering offline offers spend time: {}", endTime - startTime);
       return bits;
   }
 
开发者ID:chensed,项目名称:my-first-github,代码行数:43,代码来源:DemoFilter.java

示例10: getDocAvgTermFreqs

public void getDocAvgTermFreqs() throws IOException
{
	BufferedWriter fileWriter = new BufferedWriter(new FileWriter(resultsFile));
	//Map<Integer,String> idMap = new HashMap<Integer,String>();
	FlagConfig config = FlagConfig.getFlagConfig(new String[] {"-luceneindexpath",luceneIndex});
	LuceneUtils lu = new LuceneUtils(config);
	TermsEnum termEnum = null;
	TermsEnum terms = lu.getTermsForField("contents").iterator(termEnum);
	BytesRef bytes;
	int tc = 0;
	Map<String,TermSum> docStats = new HashMap<String, TermSum>();
	while ((bytes = terms.next()) != null) 
	{
		if (( tc % 10000 == 0 ) || ( tc < 10000 && tc % 1000 == 0 )) {
	          System.out.println("Processed " + tc + " terms ... ");
	        }
		tc++;

		Term term = new Term("contents", bytes);
		//fileWriter.write(term.text()+"\n");
		//String token = term.text();
		int termfreq = lu.getGlobalTermFreq(term);
		DocsEnum docsEnum = lu.getDocsForTerm(term);
        while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) 
        {
        	String docName = lu.getDoc(docsEnum.docID()).getField(config.docidfield()).stringValue();
        	TermSum stats = docStats.get(docName);
        	if (stats == null)
        		stats = new TermSum();
        	stats.sum += termfreq;
        	stats.terms += 1;
        	docStats.put(docName, stats);
        }
       }
	
	System.out.println("Output results to file");
	for(Map.Entry<String, TermSum> e : docStats.entrySet())
	{
		float avg = e.getValue().sum/(float)e.getValue().terms;
		fileWriter.write(""+e.getKey()+","+avg+"\n");
	}
	fileWriter.close();
}
 
开发者ID:SeldonIO,项目名称:semantic-vectors-lucene-tools,代码行数:43,代码来源:GetAvgTermFreq.java

示例11: doQueryFirstScoring

/** Used when base query is highly constraining vs the
 *  drilldowns, or when the docs must be scored at once
 *  (i.e., like BooleanScorer2, not BooleanScorer).  In
 *  this case we just .next() on base and .advance() on
 *  the dim filters. */ 
private void doQueryFirstScoring(Collector collector, DocIdSetIterator[] disis, Collector[] sidewaysCollectors,
                                 Bits[] bits, Collector[] bitsSidewaysCollectors) throws IOException {
  //if (DEBUG) {
  //  System.out.println("  doQueryFirstScoring");
  //}
  int docID = baseScorer.docID();

  nextDoc: while (docID != DocsEnum.NO_MORE_DOCS) {
    Collector failedCollector = null;
    for (int i=0;i<disis.length;i++) {
      // TODO: should we sort this 2nd dimension of
      // docsEnums from most frequent to least?
      DocIdSetIterator disi = disis[i];
      if (disi != null && disi.docID() < docID) {
        disi.advance(docID);
      }
      if (disi == null || disi.docID() > docID) {
        if (failedCollector != null) {
          // More than one dim fails on this document, so
          // it's neither a hit nor a near-miss; move to
          // next doc:
          docID = baseScorer.nextDoc();
          continue nextDoc;
        } else {
          failedCollector = sidewaysCollectors[i];
        }
      }
    }

    // TODO: for the "non-costly Bits" we really should
    // have passed them down as acceptDocs, but
    // unfortunately we cannot distinguish today betwen
    // "bits() is so costly that you should apply it last"
    // from "bits() is so cheap that you should apply it
    // everywhere down low"

    // Fold in Filter Bits last, since they may be costly:
    for(int i=0;i<bits.length;i++) {
      if (bits[i].get(docID) == false) {
        if (failedCollector != null) {
          // More than one dim fails on this document, so
          // it's neither a hit nor a near-miss; move to
          // next doc:
          docID = baseScorer.nextDoc();
          continue nextDoc;
        } else {
          failedCollector = bitsSidewaysCollectors[i];
        }
      }
    }

    collectDocID = docID;

    // TODO: we could score on demand instead since we are
    // daat here:
    collectScore = baseScorer.score();

    if (failedCollector == null) {
      // Hit passed all filters, so it's "real":
      collectHit(collector, sidewaysCollectors, bitsSidewaysCollectors);
    } else {
      // Hit missed exactly one filter:
      collectNearMiss(failedCollector);
    }

    docID = baseScorer.nextDoc();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:73,代码来源:DrillSidewaysScorer.java


注:本文中的org.apache.lucene.index.DocsEnum.NO_MORE_DOCS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。