本文整理汇总了Java中org.apache.lucene.document.Document.get方法的典型用法代码示例。如果您正苦于以下问题:Java Document.get方法的具体用法?Java Document.get怎么用?Java Document.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.document.Document
的用法示例。
在下文中一共展示了Document.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: docsToCategories
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Converts the given {@link DocumentsSearchResult} to a {@link ISearchResult} containing
* the found {@link Category}s. It extracts all category ids from the {@link Document}s
* and queries the database for them.
* @param docsResult the {@link DocumentsSearchResult} to convert.
* @return {@link ISearchResult} containing the {@link Category}s.
*/
private ISearchResult<Category> docsToCategories(final DocumentsSearchResult docsResult) {
final List<Category> categories = new ArrayList<>();
for (final Document doc : docsResult.getResults()) {
final String categoryId = doc.get(IIndexElement.FIELD_ID);
if(NumberUtils.isNumber(categoryId)) {
categories.add(getCategory(Integer.parseInt(categoryId)));
}
else {
LOGGER.error("Not numeric category id from index {}.", categoryId);
}
}
return new SimpleSearchResult<>(categories, docsResult.getTotalHits());
}
示例2: docsToUsers
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Converts the given {@link DocumentsSearchResult} to a {@link ISearchResult} containing
* the found {@link User}s. It extracts all user ids from the {@link Document}s
* and queries the database for them.
* @param docsResult the {@link DocumentsSearchResult} to convert.
* @return {@link ISearchResult} containing the {@link User}s.
*/
private ISearchResult<User> docsToUsers(final DocumentsSearchResult docsResult) {
final List<Long> userIds = new ArrayList<>();
for (final Document doc : docsResult.getResults()) {
final String userId = doc.get(IIndexElement.FIELD_ID);
if(NumberUtils.isNumber(userId)) {
userIds.add(Long.valueOf(userId));
}
else {
LOGGER.error("Not numeric user id from index {}.", userId);
}
}
// no results -> return empty ISearchResult
if(userIds.isEmpty()) {
return new SimpleSearchResult<User>(new ArrayList<User>(), docsResult.getTotalHits());
}
return new SimpleSearchResult<>(DSL.using(jooqConfig).
select().
from(TABLE).
where(TABLE.ID.in(userIds)).
orderBy(DSL.field("FIELD(id, " + StringUtils.join(userIds, ", ") + ")", Long.class)).
fetchInto(User.class), docsResult.getTotalHits());
}
示例3: docsToProviders
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Builds {@link Provider}s for the given {@link Document}s.
* @param docsResult the {@link Document}s to convert.
* @return a new {@link ISearchResult} containing the {@link Provider}s and the
* totalHits from the given {@link DocumentsSearchResult}.
*/
private ISearchResult<Provider> docsToProviders(final DocumentsSearchResult docsResult) {
final List<Provider> providers = new ArrayList<>();
for (final Document doc : docsResult.getResults()) {
final String providerId = doc.get(IIndexElement.FIELD_ID);
if(NumberUtils.isNumber(providerId)) {
providers.add(getProvider(Integer.parseInt(providerId)));
}
else {
LOGGER.error("Not numeric user id from index {}.", providerId);
}
}
return new SimpleSearchResult<>(providers, docsResult.getTotalHits());
}
示例4: collect
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@SuppressWarnings("nls")
@Override
public void collect(int docNum) throws IOException
{
Document doc = reader.document(docNum, new MapFieldSelector(FreeTextQuery.FIELD_UNIQUE, FreeTextQuery.FIELD_ID,
FreeTextQuery.FIELD_INDEXEDTIME, FreeTextQuery.FIELD_INSTITUTION));
String unique = doc.get(FreeTextQuery.FIELD_UNIQUE);
long itemId = Long.parseLong(doc.get(FreeTextQuery.FIELD_ID));
long instId = Long.parseLong(doc.get(FreeTextQuery.FIELD_INSTITUTION));
String timeStr = doc.get(FreeTextQuery.FIELD_INDEXEDTIME);
if( unique == null || timeStr == null )
{
LOGGER.warn("Corrupt document '" + docNum + "' in index. {unique:" + unique + ", time:" + timeStr + "}");
}
else
{
compareDate(itemId, instId, Long.parseLong(timeStr));
}
}
示例5: run
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
private void run() throws IOException {
IndexSearcher indexSearcher = new IndexSearcher(IndexReader.open(FSDirectory.open(new File(indexPath))));
List<FieldList> fields = new ArrayList<>();
Map<String, FieldList> fieldMap = new HashMap<>();
for (int i = 0; i < indexSearcher.maxDoc(); i++) {
Document document = indexSearcher.doc(i);
String key = document.get(keyField);
if (key.isEmpty()) {
continue;
}
String value = document.get(valueField);
int freq = Integer.parseInt(document.get(freqField));
FieldList list = fieldMap.get(key);
if (list == null) {
list = new FieldList(key);
fieldMap.put(key, list);
fields.add(list);
}
list.add(value, freq);
}
try (PrintWriter writer = new PrintWriter(outputPath)) {
fields.stream().sorted().limit(maxEntries).forEach(list -> writer.println(list.toString()));
}
}
示例6: search
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
static void search(String className, Indexer indexer, Collection<IndexingContext> contexts, List<? super ClassUsage> results) throws IOException {
String searchString = crc32base64(className.replace('.', '/'));
Query refClassQuery = indexer.constructQuery(ClassDependencyIndexCreator.FLD_NB_DEPENDENCY_CLASS.getOntology(), new StringSearchExpression(searchString));
TopScoreDocCollector collector = TopScoreDocCollector.create(NexusRepositoryIndexerImpl.MAX_RESULT_COUNT, null);
for (IndexingContext context : contexts) {
IndexSearcher searcher = context.acquireIndexSearcher();
try {
searcher.search(refClassQuery, collector);
ScoreDoc[] hits = collector.topDocs().scoreDocs;
LOG.log(Level.FINER, "for {0} ~ {1} found {2} hits", new Object[] {className, searchString, hits.length});
for (ScoreDoc hit : hits) {
int docId = hit.doc;
Document d = searcher.doc(docId);
String fldValue = d.get(ClassDependencyIndexCreator.NB_DEPENDENCY_CLASSES);
LOG.log(Level.FINER, "{0} uses: {1}", new Object[] {className, fldValue});
Set<String> refClasses = parseField(searchString, fldValue, d.get(ArtifactInfo.NAMES));
if (!refClasses.isEmpty()) {
ArtifactInfo ai = IndexUtils.constructArtifactInfo(d, context);
if (ai != null) {
ai.setRepository(context.getRepositoryId());
List<NBVersionInfo> version = NexusRepositoryIndexerImpl.convertToNBVersionInfo(Collections.singleton(ai));
if (!version.isEmpty()) {
results.add(new ClassUsage(version.get(0), refClasses));
}
}
}
}
} finally {
context.releaseIndexSearcher(searcher);
}
}
}
示例7: search
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public Map<DocumentType, List<SearchResult>> search(String searchString) throws ParseException {
Map<DocumentType, List<SearchResult>> resultMap = new TreeMap<DocumentType, List<SearchResult>>();
try {
Query query = parser.parse(searchString);
final SecondPassGroupingCollector collector = new SecondPassGroupingCollector("documentType", searchGroups,
Sort.RELEVANCE, null, 5, true, false, true);
searcher.search(query, collector);
final TopGroups groups = collector.getTopGroups(0);
for (GroupDocs groupDocs : groups.groups) {
DocumentType docType = DocumentType.valueOf(groupDocs.groupValue);
List<SearchResult> results = new ArrayList<SearchResult>();
for (ScoreDoc scoreDoc : groupDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
SearchResult result = new SearchResult(
docType,
doc.get("name"),
doc.get("url"),
doc.get("className"),
doc.get("package"),
doc.get("ensemblePath"),
doc.get("shortDescription")
);
results.add(result);
}
resultMap.put(docType, results);
}
} catch (IOException e) {
e.printStackTrace();
}
return resultMap;
}
示例8: toBean
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@Override
public List<IfcProductRecordText> toBean(IndexSearcher indexSearcher, Query query, ScoreDoc[] hits) {
List<IfcProductRecordText> hitRecords = new ArrayList<IfcProductRecordText>();
try {
for (int i = 0; i < hits.length; i++) {
ScoreDoc scoreDoc = hits[i];
Document hitDoc = indexSearcher.doc(scoreDoc.doc);
String oid = hitDoc.get(Key_Oid);
String type = hitDoc.get(Key_Type);
String name = hitDoc.get(Key_Name);
String detail = hitDoc.get(Key_Detail);
IfcProductRecordText record = new IfcProductRecordText();
record.setOid(oid);
record.setType(type);
record.setName(name);
record.setDetail(detail);
hitRecords.add(record);
}
} catch (IOException e) {
e.printStackTrace();
}
return hitRecords;
}
示例9: doPagingSearch
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public SearchResult doPagingSearch(IndexReader reader, IndexSearcher searcher, Query query, String queryString, int page) throws IOException {
TopDocs results = searcher.search(query, 20 * this.PAGELIMIT); // 20 pages worth of documents
ScoreDoc[] hits = results.scoreDocs;
int numTotalHits = results.totalHits;
int start = this.PAGELIMIT * page;
int end = Math.min(numTotalHits, (this.PAGELIMIT * (page + 1)));
int noPages = numTotalHits / this.PAGELIMIT;
if (noPages > 20) {
noPages = 19;
}
List<Integer> pages = this.calculatePages(numTotalHits, noPages);
List<ContentDTO> contentDTOList = new ArrayList<>();
for (int i = start; i < end; i++) {
Document doc = searcher.doc(hits[i].doc);
String filepath = doc.get(Values.PATH);
// Get the content out of database
ContentDTO byId = this.contentDAO.getById(Helpers.tryParseInt(filepath, -1));
if (byId != null) {
contentDTOList.add(byId);
}
}
return new SearchResult(numTotalHits, page, pages, queryString, contentDTOList);
}
示例10: process
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
/**
* Here we define the component core logic.
* For each document belonging to search results, we call an external service
* for gathering a corresponding up-to-date price.
*
* @param rb The {@link org.apache.solr.handler.component.ResponseBuilder}
* @throws IOException If there is a low-level I/O error.
*/
@Override
public void process(final ResponseBuilder builder) throws IOException {
// Sanity check: if the component hasn't been properly initialised
// then it must immediately return.
// A more ideal approach could retry the initialisation (in the prepare method).
if (!hasBeenCorrectlyInitialised) {
return;
}
// Get a SolrIndexSearcher reference
final SolrIndexSearcher searcher = builder.req.getSearcher();
// This NamediLis will hold the component contribution (i.e. the component result).
final NamedList<Double> contribution = new SimpleOrderedMap<Double>();
for (final DocIterator it = builder.getResults().docList.iterator(); it.hasNext();) {
// This is NOT the Solr ID of our records, but instead the Lucene internal document id
// which is different
int docId = it.nextDoc();
final Document luceneDocument = searcher.doc(docId);
// This is the Solr document Id
String id = luceneDocument.get("id");
// Get the price of the item
final Double itemPrice = getPrice(id);
// Add the price of the item to the component contribution
contribution.add(id, itemPrice);
}
// Add the component contribution to the response builder
builder.rsp.add("prices", contribution);
}
示例11: search
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
public static void search(String indexDir, String q) throws Exception {
Directory dir = FSDirectory.open(Paths.get(indexDir));
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher is = new IndexSearcher(reader);
// Analyzer analyzer=new StandardAnalyzer(); // 标准分词器
SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
QueryParser parser = new QueryParser("desc", analyzer);
Query query = parser.parse(q);
long start = System.currentTimeMillis();
TopDocs hits = is.search(query, 10);
long end = System.currentTimeMillis();
System.out.println("匹配 " + q + " ,总共花费" + (end - start) + "毫秒" + "查询到" + hits.totalHits + "个记录");
QueryScorer scorer = new QueryScorer(query);
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
highlighter.setTextFragmenter(fragmenter);
for (ScoreDoc scoreDoc : hits.scoreDocs) {
Document doc = is.doc(scoreDoc.doc);
System.out.println(doc.get("city"));
System.out.println(doc.get("desc"));
String desc = doc.get("desc");
if (desc != null) {
TokenStream tokenStream = analyzer.tokenStream("desc", new StringReader(desc));
System.out.println(highlighter.getBestFragment(tokenStream, desc));
}
}
reader.close();
}
示例12: convert
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@Override
public String convert(Document p) {
return p.get(name);
}
示例13: prefetch
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
private void prefetch(int n) throws IOException
{
NodeBulkLoader bulkLoader = config.getBulkLoader();
if (!getBulkFetch() || (bulkLoader == null))
{
// No prefetching
return;
}
if (prefetch.get(n))
{
// The document was already processed
return;
}
// Start at 'n' and process the the next bulk set
int bulkFetchSize = getBulkFetchSize();
List<NodeRef> fetchList = new ArrayList<NodeRef>(bulkFetchSize);
int totalHits = hits.length();
for (int i = 0; i < bulkFetchSize; i++)
{
int next = n + i;
if (next >= totalHits)
{
// We've hit the end
break;
}
if (prefetch.get(next))
{
// This one is in there already
continue;
}
// We store the node and mark it as prefetched
prefetch.set(next);
Document doc = hits.doc(next);
String nodeRefStr = doc.get("ID");
try
{
NodeRef nodeRef = tenantService.getBaseName(new NodeRef(nodeRefStr));
fetchList.add(nodeRef);
}
catch (AlfrescoRuntimeException e)
{
// Ignore IDs that don't parse as NodeRefs, e.g. FTSREF docs
}
}
// Now bulk fetch
if (fetchList.size() > 1)
{
bulkLoader.cacheNodes(fetchList);
}
}
示例14: searchByIndex
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@Override
public Set<NitriteId> searchByIndex(String field, String searchString) {
IndexReader indexReader = null;
try {
QueryParser parser = new QueryParser(Version.LUCENE_4_9, field, analyzer);
parser.setAllowLeadingWildcard(true);
Query query = parser.parse("*" + searchString + "*");
indexReader = DirectoryReader.open(indexDirectory);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
TopScoreDocCollector collector = TopScoreDocCollector.create(MAX_SEARCH, true);
indexSearcher.search(query, collector);
TopDocs hits = collector.topDocs(0, MAX_SEARCH);
Set<NitriteId> keySet = new LinkedHashSet<>();
if (hits != null) {
ScoreDoc[] scoreDocs = hits.scoreDocs;
if (scoreDocs != null) {
for (ScoreDoc scoreDoc : scoreDocs) {
Document document = indexSearcher.doc(scoreDoc.doc);
String jsonId = document.get(CONTENT_ID);
NitriteId nitriteId = keySerializer.readValue(jsonId, NitriteId.class);
keySet.add(nitriteId);
}
}
}
return keySet;
} catch (IOException | ParseException e) {
throw new IndexingException(errorMessage(
"could not search on full-text index", 0), e);
} finally {
try {
if (indexReader != null) indexReader.close();
} catch (IOException ignored) {
// ignored
}
}
}
示例15: createResult
import org.apache.lucene.document.Document; //导入方法依赖的package包/类
@Override
protected ActivationResult createResult(ItemIdKey key, Document doc, float relevance, boolean sortByRelevance)
{
return new ActivationResult(key, doc.get(FreeTextQuery.FIELD_ACTIVATION_ID), relevance, sortByRelevance);
}