本文整理汇总了Python中lucene.QueryParser.parse方法的典型用法代码示例。如果您正苦于以下问题:Python QueryParser.parse方法的具体用法?Python QueryParser.parse怎么用?Python QueryParser.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lucene.QueryParser
的用法示例。
在下文中一共展示了QueryParser.parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testPrefixQuery
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def testPrefixQuery(self):
parser = QueryParser(Version.LUCENE_CURRENT, "category",
StandardAnalyzer(Version.LUCENE_CURRENT))
parser.setLowercaseExpandedTerms(False)
print parser.parse("/Computers/technology*").toString("category")
示例2: does_line_exist
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def does_line_exist(self,line,x,y):
"""
Old, more complex function if a sentence already exists in the index.
Not used in the moment
"""
return self.does_line_existNew(line, x, y)
try:
array = re.findall(r'[\w\s]+',x)
x = ""
for item in array:
x+=item
qp = QueryParser(Version.LUCENE_35, "X", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(x)
MAX = 100000
hits = searcher.search(query, MAX)
#First check, if an x already exists
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
y_entry = doc["Y"]
if y_entry == y:
print "y found"
print
try:
array = re.findall(r'[\w\s]+',line)
string = ""
for item in array:
string+=item
qp = QueryParser(Version.LUCENE_35, "Sentence", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(string)
MAX = 10
hits = searcher.search(query, MAX)
if len(hits.scoreDocs)>0:
return True
except Exception:
s_tmp = str(sys.exc_info())
if "too many boolean clauses" in s_tmp:
print "too many boolean clauses"
return True
else:
print "Unexpected error:", sys.exc_info()[0]
print "in does line exist"
print s_tmp
print 'nothing found'
return False
except:
print("Fail (does line exists) in x:"+x+" y:"+y)
print "Unexpected error:", sys.exc_info()[0]
print
示例3: search
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def search(self, string ,special = None):
query = ""
try:
MAX = 100000
#for dates such as 1931.08.06
string = string.replace("."," ")
array = re.findall(r'[\w\s]+',string)
string = ""
for item in array:
string+=item
qp = QueryParser(Version.LUCENE_35, "title", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(string)
# print ("query",query)
hits = searcher.search(query, MAX)
sentence_list = []
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
sentence_list.append(doc.get("title").encode("utf-8"))
return sentence_list
except:
print("Fail in receiving sentence with term "+string)
print ("query",query)
print "Unexpected error:", sys.exc_info()[0]
# raw_input("wait")
print
return []
示例4: searchString
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def searchString(self, string):
'searches for a string and returns an array of POS-tagged sentences'
query = ""
#print("Input String: ",string)
try:
MAX = 100000
#for dates such as 1931.08.06
string = string.replace("."," ")
array = re.findall(r'[\w\s]+',string)
string = ""
for item in array:
string+=item
#print("Input String2: ",string)
qp = QueryParser(Version.LUCENE_35, "sentence", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(string)
#print ("query",query)
hits = searcher.search(query, MAX)
#print len(hits)
sentence_list = []
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
#print doc.get("sentence")
sentence_list.append(eval(doc.get("sentence").encode("utf-8")))
return sentence_list
except:
print("Fail in receiving sentence with term "+string+" in search term")
print ("query",query)
print "Unexpected error:", sys.exc_info()[0]
# raw_input("wait")
print
return []
示例5: main
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def main(cls, argv):
allBooks = MatchAllDocsQuery()
parser = QueryParser(Version.LUCENE_CURRENT, "contents",
StandardAnalyzer(Version.LUCENE_CURRENT))
query = BooleanQuery()
query.add(allBooks, BooleanClause.Occur.SHOULD)
query.add(parser.parse("java OR action"), BooleanClause.Occur.SHOULD)
indexDir = System.getProperty("index.dir")
directory = SimpleFSDirectory(File(indexDir))
example = SortingExample(directory)
example.displayResults(query, Sort.RELEVANCE)
example.displayResults(query, Sort.INDEXORDER)
example.displayResults(query,
Sort(SortField("category", SortField.STRING)))
example.displayResults(query,
Sort(SortField("pubmonth", SortField.INT, True)))
example.displayResults(query,
Sort([SortField("category", SortField.STRING),
SortField.FIELD_SCORE,
SortField("pubmonth", SortField.INT, True)]))
example.displayResults(query,
Sort([SortField.FIELD_SCORE,
SortField("category", SortField.STRING)]))
directory.close()
示例6: searchForDbpediaURI
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def searchForDbpediaURI(self, uri):
"""
Returns all anchor texts, which are related to the given DBpedia URI.
Also returns for each anchor text the corresponding URI and the number of how often the anchor appears on the english Wikipedia
"""
uri_old = uri
uri = uri.replace("http://dbpedia.org/resource/","")
array = re.findall(r'[\w\s]+',uri)
uri = ""
for item in array:
uri+=item
try:
qp = QueryParser(Version.LUCENE_35, "dbpedia_uri", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(uri)
MAX = 10000
result = []
hits = searcher.search(query, MAX)
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
dbpedia_uri = doc["dbpedia_uri"].encode("utf-8")
if dbpedia_uri == uri_old:
result.append([doc["anchor"].encode("utf-8"), doc["anchor_uri"].encode("utf-8"), dbpedia_uri, doc["number"].encode("utf-8")])
return result
except:
print("searchForDbpediaURI - Fail in uri: "+uri)
return []
示例7: searchXYPair
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def searchXYPair(self,x,y):
"""
Returns all sentences, which are tagged with the given two entities (x,y)
"""
tmp_hm = {}
if x == "" or y == "":
return []
try:
array = re.findall(r'[\w\s]+',x)
x = ""
for item in array:
x+=item
qp = QueryParser(Version.LUCENE_35, "X", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(x)
MAX = 100000
result_list = []
hits = searcher.search(query, MAX)
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
y_entry = doc["Y"]
if y_entry == y:
tmp_hm[doc["Sentence"]]=""
for key in tmp_hm:
result_list.append(IndexUtils.sentence_wrapper(key))
tmp_hm = {}
return result_list
except:
print("Fail (search XYPair) in x:"+x+" y:"+y)
print "Unexpected error:", sys.exc_info()[0]
print
return []
示例8: searchForDbpediaURI
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def searchForDbpediaURI(self, uri):
"""
Returns all sentences, which are tagged with the given DBpedia URI
"""
print "in searchForDbpediaURI"
uri_old = uri
uri = uri.replace("http://dbpedia.org/ontology/","")
uri = uri.replace("http://dbpedia.org/property/","")
uri = uri.replace("http://dbpedia.org/resource/","")
array = re.findall(r'[\w\s]+',uri)
uri = ""
for item in array:
uri+=item
try:
qp = QueryParser(Version.LUCENE_35, "URI", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(uri)
print "query: "+str(query)
MAX = 500000
result = []
hits = searcher.search(query, MAX)
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
dbpedia_uri = doc["URI"]
if dbpedia_uri == uri_old:
result.append([IndexUtils.sentence_wrapper(doc["Sentence"]), doc["X"], doc["Y"],dbpedia_uri])
return result
except:
print("Fail in uri: "+uri)
print "Unexpected error:", sys.exc_info()[0]
return result
示例9: search
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def search(r, keyword=""):
import logging
logger = logging.getLogger("search")
bench = Benchmark(logger)
from lucene import IndexSearcher, StandardAnalyzer, FSDirectory, QueryParser, File, Hit
import lucene, os
os.environ["JAVA_HOME"] = "/usr/local/jdk1.6.0_17"
lucene.initVM(lucene.CLASSPATH)
directory = FSDirectory.open(File(CONFIG.INDEX_PATH))
ROBOT_INDEX = IndexSearcher(directory, True)
ROBOT_ANALYZER = StandardAnalyzer()
keyword = keyword or r.GET["keyword"]
query = QueryParser("context", ROBOT_ANALYZER)
query = query.parse('"%s"' % keyword)
bench.start_mark("search")
hits = ROBOT_INDEX.search(query)
count = len(hits)
result = []
i = 0
for hit in hits:
i += 1
if i > 100:
break
doc = Hit.cast_(hit).getDocument()
result.append(SearchResult(doc, i, keyword))
ROBOT_INDEX.close()
et = bench.stop_mark()
return render_to_response("robot_search_result.html", {"result": result, "count": count, "elaspe": et})
示例10: searchKey
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def searchKey(self, key , rank = None):
query = ""
try:
MAX = 100000
qp = QueryParser(Version.LUCENE_35, "key", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(key)
# print ("query",query)
hits = searcher.search(query, MAX)
sentence_list = []
for hit in hits.scoreDocs:
doc = searcher.doc(hit.doc)
try:
sentence_list.append(eval(doc.get("sentence").encode("utf-8")))
except:
print doc.get("sentence")
return sentence_list
except:
print("Fail in receiving sentence with term "+key)
print ("query",query)
print "Unexpected error:", sys.exc_info()[0]
# raw_input("wait")
print
return []
示例11: does_line_existNew
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def does_line_existNew(self,line,x,y):
"""
Checks, if parsed sentence already exists in index
"""
query = ""
try:
array = re.findall(r'[\w]+',line)
string = ""
for item in array:
string+=item+" "
qp = QueryParser(Version.LUCENE_35, "Sentence", analyzer)
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(string)
MAX = 10
hits = searcher.search(query, MAX)
if len(hits.scoreDocs)>0:
return True
else:
return False
except Exception:
s_tmp = str(sys.exc_info())
if "too many boolean clauses" in s_tmp:
print "too many boolean clauses"
"""
Returns true, so that the sentence is not added each time, to avoid further error messages.
Only occours with very large sentences.
"""
return True
else:
print "Unexpected error:", sys.exc_info()[0]
print "in does line exist"
print s_tmp
return False
示例12: query
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def query(indexName, queryString):
indSearcher = IndexSearcher(SimpleFSDirectory(File(indexName)))
qp = QueryParser(Version.LUCENE_CURRENT, "content", StandardAnalyzer(Version.LUCENE_CURRENT))
qp.setDefaultOperator(qp.Operator.AND)
query = qp.parse(queryString.replace("-","_"))
aux = indSearcher.search(query, 100)
results = aux.scoreDocs
hits = aux.totalHits
ir = indSearcher.getIndexReader()
#results = collector.topDocs()
i = 0
res = []
for r in results:
doc = ir.document(i)
res.insert(i, doc.get('id'))
i+=1
return res
示例13: testSlop
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def testSlop(self):
q = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer).parse('"exact phrase"')
self.assertEqual('"exact phrase"', q.toString("field"), "zero slop")
qp = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer)
qp.setPhraseSlop(5)
q = qp.parse('"sloppy phrase"')
self.assertEqual('"sloppy phrase"~5', q.toString("field"), "sloppy, implicitly")
示例14: testLowercasing
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def testLowercasing(self):
q = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer).parse("PrefixQuery*")
self.assertEqual("prefixquery*", q.toString("field"), "lowercased")
qp = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer)
qp.setLowercaseExpandedTerms(False)
q = qp.parse("PrefixQuery*")
self.assertEqual("PrefixQuery*", q.toString("field"), "not lowercased")
示例15: extractFeatureQueryWords
# 需要导入模块: from lucene import QueryParser [as 别名]
# 或者: from lucene.QueryParser import parse [as 别名]
def extractFeatureQueryWords(query):
import string
from lucene import Document, TermQuery, Term
# create analyzer
aux_analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
try:
file = open('../features.txt', 'r')
featurelist = []
for line in file.readlines():
words_in_line = line.split()
featurelist += words_in_line
querywordlist = query.split()
featureQueryList = []
productQueryList = []
for word in querywordlist:
if word in featurelist:
featureQueryList.append(word)
else:
# create parser for word
aux_parser = QueryParser(Version.LUCENE_CURRENT, "title", aux_analyzer)
aux_query = aux_parser.parse(word)
scoreDocs = searcher.search(aux_query, 50).scoreDocs
if scoreDocs:
productQueryList.append(word)
featureQuery = ""
if featureQueryList:
featureQuery = "("
for i in range(len(featureQueryList)):
if i == len(featureQueryList) - 1:
featureQuery += featureQueryList[i] + ")"
else:
featureQuery += featureQueryList[i] + " AND "
print featureQuery
productQuery = ""
if productQueryList:
productQuery = "("
for i in range(len(productQueryList)):
if i == len(productQueryList) - 1:
productQuery += productQueryList[i] + ")"
else:
productQuery += productQueryList[i] + " AND "
return (featureQuery, productQuery, featureQueryList, productQueryList)
except Exception, ex:
print "Could not separate feature query words. Reason: ", ex
return ("", "(" + query + ")", [], querywordlist)