本文整理汇总了Python中collective.solr.indexer.SolrIndexProcessor.index方法的典型用法代码示例。如果您正苦于以下问题:Python SolrIndexProcessor.index方法的具体用法?Python SolrIndexProcessor.index怎么用?Python SolrIndexProcessor.index使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类collective.solr.indexer.SolrIndexProcessor
的用法示例。
在下文中一共展示了SolrIndexProcessor.index方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testTwoRequests
# 需要导入模块: from collective.solr.indexer import SolrIndexProcessor [as 别名]
# 或者: from collective.solr.indexer.SolrIndexProcessor import index [as 别名]
def testTwoRequests(self):
mngr = SolrConnectionManager(active=True)
proc = SolrIndexProcessor(mngr)
output = fakehttp(mngr.getConnection(), getData('schema.xml'),
getData('add_response.txt'))
proc.index(self.foo)
mngr.closeConnection()
self.assertEqual(len(output), 2)
self.failUnless(output.get().startswith(self.schema_request))
self.assertEqual(sortFields(output.get()), getData('add_request.txt'))
示例2: RobustnessTests
# 需要导入模块: from collective.solr.indexer import SolrIndexProcessor [as 别名]
# 或者: from collective.solr.indexer.SolrIndexProcessor import index [as 别名]
class RobustnessTests(TestCase):
layer = COLLECTIVE_SOLR_MOCK_REGISTRY_FIXTURE
def setUp(self):
self.mngr = SolrConnectionManager()
self.mngr.setHost(active=True)
self.conn = self.mngr.getConnection()
self.proc = SolrIndexProcessor(self.mngr)
self.log = [] # catch log messages...
def logger(*args):
self.log.extend(args)
logger_indexer.warning = logger
config = getConfig()
config.atomic_updates = True
def tearDown(self):
self.mngr.closeConnection()
self.mngr.setHost(active=False)
def testIndexingWithUniqueKeyMissing(self):
# fake schema response
fakehttp(self.conn, getData('simple_schema.xml'))
# read and cache the schema
self.mngr.getSchema()
response = getData('add_response.txt')
output = fakehttp(self.conn, response) # fake add response
foo = Foo(id='500', name='foo')
# indexing sends data
self.proc.index(foo)
# nothing happened...
self.assertEqual(len(output), 0)
self.assertEqual(self.log, [
'schema is missing unique key, skipping indexing of %r', foo])
def testUnindexingWithUniqueKeyMissing(self):
# fake schema response
fakehttp(self.conn, getData('simple_schema.xml'))
# read and cache the schema
self.mngr.getSchema()
response = getData('delete_response.txt')
# fake delete response
output = fakehttp(self.conn, response)
foo = Foo(id='500', name='foo')
# unindexing sends data
self.proc.unindex(foo)
# nothing happened...
self.assertEqual(len(output), 0)
self.assertEqual(self.log, [
'schema is missing unique key, skipping unindexing of %r', foo])
示例3: testExtraRequest
# 需要导入模块: from collective.solr.indexer import SolrIndexProcessor [as 别名]
# 或者: from collective.solr.indexer.SolrIndexProcessor import index [as 别名]
def testExtraRequest(self):
# basically the same as `testThreeRequests`, except it
# tests adding fake responses consecutively
mngr = SolrConnectionManager(active=True)
proc = SolrIndexProcessor(mngr)
conn = mngr.getConnection()
output = fakehttp(conn, getData('schema.xml'))
fakemore(conn, getData('add_response.txt'))
proc.index(self.foo)
fakemore(conn, getData('delete_response.txt'))
proc.unindex(self.foo)
mngr.closeConnection()
self.assertEqual(len(output), 3)
self.failUnless(output.get().startswith(self.schema_request))
self.assertEqual(sortFields(output.get()), getData('add_request.txt'))
self.assertEqual(output.get(), getData('delete_request.txt'))
示例4: sync
# 需要导入模块: from collective.solr.indexer import SolrIndexProcessor [as 别名]
# 或者: from collective.solr.indexer.SolrIndexProcessor import index [as 别名]
def sync(self, batch=1000):
"""Sync the Solr index with the portal catalog. Records contained
in the catalog but not in Solr will be indexed and records not
contained in the catalog will be removed.
"""
manager = queryUtility(ISolrConnectionManager)
proc = SolrIndexProcessor(manager)
conn = manager.getConnection()
key = queryUtility(ISolrConnectionManager).getSchema().uniqueKey
zodb_conn = self.context._p_jar
catalog = getToolByName(self.context, "portal_catalog")
getIndex = catalog._catalog.getIndex
modified_index = getIndex("modified")
uid_index = getIndex(key)
log = self.mklog()
real = timer() # real time
lap = timer() # real lap time (for intermediate commits)
cpu = timer(clock) # cpu time
# get Solr status
query = "+%s:[* TO *]" % key
response = conn.search(q=query, rows=MAX_ROWS, fl="%s modified" % key)
# avoid creating DateTime instances
simple_unmarshallers = unmarshallers.copy()
simple_unmarshallers["date"] = parse_date_as_datetime
flares = SolrResponse(response, simple_unmarshallers)
response.close()
solr_results = {}
solr_uids = set()
def _utc_convert(value):
t_tup = value.utctimetuple()
return (((t_tup[0] * 12 + t_tup[1]) * 31 + t_tup[2]) * 24 + t_tup[3]) * 60 + t_tup[4]
for flare in flares:
uid = flare[key]
solr_uids.add(uid)
solr_results[uid] = _utc_convert(flare["modified"])
# get catalog status
cat_results = {}
cat_uids = set()
for uid, rid in uid_index._index.items():
cat_uids.add(uid)
cat_results[uid] = rid
# differences
index = cat_uids.difference(solr_uids)
solr_uids.difference_update(cat_uids)
unindex = solr_uids
processed = 0
flush = notimeout(lambda: conn.flush())
def checkPoint():
msg = "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, lap.next())
log(msg)
logger.info(msg)
flush()
zodb_conn.cacheGC()
cpi = checkpointIterator(checkPoint, batch)
# Look up objects
uid_rid_get = cat_results.get
rid_path_get = catalog._catalog.paths.get
catalog_traverse = catalog.unrestrictedTraverse
def lookup(
uid, rid=None, uid_rid_get=uid_rid_get, rid_path_get=rid_path_get, catalog_traverse=catalog_traverse
):
if rid is None:
rid = uid_rid_get(uid)
if not rid:
return None
if not isinstance(rid, int):
rid = tuple(rid)[0]
path = rid_path_get(rid)
if not path:
return None
try:
obj = catalog_traverse(path)
except AttributeError:
return None
return obj
log('processing %d "unindex" operations next...\n' % len(unindex))
op = notimeout(lambda uid: conn.delete(id=uid))
for uid in unindex:
obj = lookup(uid)
if obj is None:
op(uid)
processed += 1
cpi.next()
else:
log("not unindexing existing object %r.\n" % uid)
log('processing %d "index" operations next...\n' % len(index))
op = notimeout(lambda obj: proc.index(obj))
for uid in index:
obj = lookup(uid)
if indexable(obj):
op(obj)
processed += 1
cpi.next()
else:
#.........这里部分代码省略.........
示例5: QueueIndexerTests
# 需要导入模块: from collective.solr.indexer import SolrIndexProcessor [as 别名]
# 或者: from collective.solr.indexer.SolrIndexProcessor import index [as 别名]
class QueueIndexerTests(TestCase):
def setUp(self):
provideUtility(SolrConnectionConfig(), ISolrConnectionConfig)
self.mngr = SolrConnectionManager()
self.mngr.setHost(active=True)
conn = self.mngr.getConnection()
fakehttp(conn, getData('schema.xml')) # fake schema response
self.mngr.getSchema() # read and cache the schema
self.proc = SolrIndexProcessor(self.mngr)
def tearDown(self):
self.mngr.closeConnection()
self.mngr.setHost(active=False)
def testPrepareData(self):
data = {'allowedRolesAndUsers': [
'user:test_user_1_', 'user:portal_owner']}
prepareData(data)
self.assertEqual(
data,
{
'allowedRolesAndUsers': [
'user$test_user_1_',
'user$portal_owner'
]
}
)
def testLanguageParameterHandling(self):
# empty strings are replaced...
data = {'Language': ['en', '']}
prepareData(data)
self.assertEqual(data, {'Language': ['en', 'any']})
data = {'Language': ''}
prepareData(data)
self.assertEqual(data, {'Language': 'any'})
# for other indices this shouldn't happen...
data = {'Foo': ['en', '']}
prepareData(data)
self.assertEqual(data, {'Foo': ['en', '']})
def testIndexObject(self):
response = getData('add_response.txt')
# fake add response
output = fakehttp(self.mngr.getConnection(), response)
# indexing sends data
self.proc.index(Foo(id='500', name='python test doc'))
self.assertEqual(sortFields(str(output)), getData('add_request.txt'))
def testIndexAccessorRaises(self):
response = getData('add_response.txt')
# fake add response
output = fakehttp(self.mngr.getConnection(), response)
def brokenfunc():
raise ValueError
self.proc.index(Foo(id='500', name='python test doc',
text=brokenfunc)) # indexing sends data
self.assertEqual(sortFields(str(output)), getData('add_request.txt'))
def testPartialIndexObject(self):
foo = Foo(id='500', name='foo', price=42.0)
# first index all attributes...
response = getData('add_response.txt')
output = fakehttp(self.mngr.getConnection(), response)
self.proc.index(foo)
self.assert_(str(output).find(
'<field name="price">42.0</field>') > 0, '"price" data not found')
# then only a subset...
response = getData('add_response.txt')
output = fakehttp(self.mngr.getConnection(), response)
self.proc.index(foo, attributes=['id', 'name'])
output = str(output)
self.assert_(
output.find('<field name="name">foo</field>') > 0,
'"name" data not found'
)
# at this point we'd normally check for a partial update:
# self.assertEqual(output.find('price'), -1, '"price" data found?')
# self.assertEqual(output.find('42'), -1, '"price" data found?')
# however, until SOLR-139 has been implemented (re)index operations
# always need to provide data for all attributes in the schema...
self.assert_(
output.find('<field name="price">42.0</field>') > 0,
'"price" data not found'
)
def testDateIndexing(self):
foo = Foo(id='zeidler', name='andi', cat='nerd',
timestamp=DateTime('May 11 1972 03:45 GMT'))
response = getData('add_response.txt')
# fake add response
output = fakehttp(self.mngr.getConnection(), response)
self.proc.index(foo)
required = '<field name="timestamp">1972-05-11T03:45:00.000Z</field>'
self.assert_(str(output).find(required) > 0, '"date" data not found')
def testDateIndexingWithPythonDateTime(self):
foo = Foo(id='gerken', name='patrick', cat='nerd',
#.........这里部分代码省略.........