本文整理汇总了Python中pysolr.Solr._send_request方法的典型用法代码示例。如果您正苦于以下问题:Python Solr._send_request方法的具体用法?Python Solr._send_request怎么用?Python Solr._send_request使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysolr.Solr
的用法示例。
在下文中一共展示了Solr._send_request方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_site
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
def delete_site(site_key):
# connect to the data sources
# connect to the database
print('Connecting to the database...')
conn_str = settings.DATABASE_CONN_STR
sqlhub.processConnection = connectionForURI(conn_str)
print('Connected.\n')
# connect to solr
print('Connecting to solr...')
solr = Solr(settings.SOLR_URL)
# pysolr doesn't try to connect until a request is made, so we'll make a ping request
try:
solr._send_request('GET', '%s/admin/ping' % solr.path)
except socket.error, e:
print('Failed to connect to solr - error was: %s' % str(e))
print('Aborting.')
sys.exit(2)
示例2: import_site
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
def import_site(xml_root, site_name, dump_date, site_desc, site_key,
site_base_url, answer_yes=False):
print('Using the XML root path: ' + xml_root + '\n')
if not os.path.exists(xml_root):
print('The given XML root path does not exist.')
sys.exit(1)
# connect to the database
print('Connecting to the Stackdump database...')
conn_str = settings.DATABASE_CONN_STR
sqlhub.processConnection = connectionForURI(conn_str)
print('Connected.\n')
# connect to solr
print('Connecting to solr...')
solr = Solr(settings.SOLR_URL, assume_clean=True)
# pysolr doesn't try to connect until a request is made, so we'll make a ping request
try:
solr._send_request('GET', 'admin/ping')
except socket.error, e:
print('Failed to connect to solr - error was: %s' % str(e))
print('Aborting.')
sys.exit(2)
示例3: __init__
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class DocManager:
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL, unique_key="_id", **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.field_list = []
self._build_fields()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get("schema", {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request("get", ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, "fields")
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, "dynamicFields"):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(re.compile("\w%s\Z" % wc_pattern))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(re.compile("\A%s\w*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
def flattened(doc):
def flattened_kernel(doc, path):
for k, v in doc.items():
path.append(k)
if isinstance(v, dict):
for inner_k, inner_v in flattened_kernel(v, path):
yield inner_k, inner_v
elif isinstance(v, list):
for li, lv in enumerate(v):
path.append(str(li))
if isinstance(lv, dict):
for dk, dv in flattened_kernel(lv, path):
yield dk, dv
else:
yield ".".join(path), lv
path.pop()
else:
yield ".".join(path), v
path.pop()
return dict(flattened_kernel(doc, []))
# Translate the _id field to whatever unique key we're using
doc[self.unique_key] = doc["_id"]
flat_doc = flattened(doc)
# Only include fields that are explicitly provided in the
#.........这里部分代码省略.........
示例4: DocManager
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = doc.pop("_id")
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
#.........这里部分代码省略.........
示例5: SolrTestCase
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class SolrTestCase(unittest.TestCase):
def setUp(self):
super(SolrTestCase, self).setUp()
self.default_solr = Solr('http://localhost:8983/solr/core0')
# Short timeouts.
self.solr = Solr('http://localhost:8983/solr/core0', timeout=2)
self.docs = [
{
'id': 'doc_1',
'title': 'Example doc 1',
'price': 12.59,
'popularity': 10,
},
{
'id': 'doc_2',
'title': 'Another example ☃ doc 2',
'price': 13.69,
'popularity': 7,
},
{
'id': 'doc_3',
'title': 'Another thing',
'price': 2.35,
'popularity': 8,
},
{
'id': 'doc_4',
'title': 'doc rock',
'price': 99.99,
'popularity': 10,
},
{
'id': 'doc_5',
'title': 'Boring',
'price': 1.12,
'popularity': 2,
},
]
# Clear it.
self.solr.delete(q='*:*')
# Index our docs. Yes, this leans on functionality we're going to test
# later & if it's broken, everything will catastrophically fail.
# Such is life.
self.solr.add(self.docs)
def tearDown(self):
self.solr.delete(q='*:*')
super(SolrTestCase, self).tearDown()
def test_init(self):
self.assertEqual(self.default_solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.default_solr.decoder, json.JSONDecoder))
self.assertEqual(self.default_solr.timeout, 60)
self.assertEqual(self.solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.solr.decoder, json.JSONDecoder))
self.assertEqual(self.solr.timeout, 2)
def test__create_full_url(self):
# Nada.
self.assertEqual(self.solr._create_full_url(path=''), 'http://localhost:8983/solr/core0')
# Basic path.
self.assertEqual(self.solr._create_full_url(path='pysolr_tests'), 'http://localhost:8983/solr/core0/pysolr_tests')
# Leading slash (& making sure we don't touch the trailing slash).
self.assertEqual(self.solr._create_full_url(path='/pysolr_tests/select/?whatever=/'), 'http://localhost:8983/solr/core0/pysolr_tests/select/?whatever=/')
def test__send_request(self):
# Test a valid request.
resp_body = self.solr._send_request('GET', 'select/?q=doc&wt=json')
self.assertTrue('"numFound":3' in resp_body)
# Test a lowercase method & a body.
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee! ☃</field></doc></add>'
resp_body = self.solr._send_request('POST', 'update/?commit=true', body=xml_body, headers={
'Content-type': 'text/xml; charset=utf-8',
})
self.assertTrue('<int name="status">0</int>' in resp_body)
# Test a non-existent URL.
old_url = self.solr.url
self.solr.url = 'http://127.0.0.1:567898/wahtever'
self.assertRaises(SolrError, self.solr._send_request, 'get', 'select/?q=doc&wt=json')
self.solr.url = old_url
def test__select(self):
# Short params.
resp_body = self.solr._select({'q': 'doc'})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 3)
# Long params.
resp_body = self.solr._select({'q': 'doc' * 1024})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 0)
self.assertEqual(len(resp_data['responseHeader']['params']['q']), 3 * 1024)
def test__mlt(self):
resp_body = self.solr._mlt({'q': 'id:doc_1', 'mlt.fl': 'title'})
#.........这里部分代码省略.........
示例6: DocManager
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.url = url
self.solr = Solr(url, **kwargs.get('clientOptions', {}))
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc, namespace, timestamp):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
- inserts namespace and timestamp metadata into the document in order
to handle rollbacks
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = u(doc.pop("_id"))
# Update namespace and timestamp metadata
if 'ns' in doc or '_ts' in doc:
raise errors.OperationFailed(
'Need to set "ns" and "_ts" fields, but these fields already '
'exist in the document %r!' % doc)
doc['ns'] = namespace
doc['_ts'] = timestamp
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
#.........这里部分代码省略.........
示例7: DocManager
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class DocManager():
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit=False, unique_key='_id', **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
self.auto_commit = auto_commit
self.field_list = []
self.dynamic_field_list = []
self.build_fields()
if auto_commit:
self.run_auto_commit()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
def build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields'),
self.dynamic_field_list = self._parse_fields(result, 'dynamicFields')
def clean_doc(self, doc):
""" Cleans a document passed in to be compliant with the Solr as
used by Solr. This WILL remove fields that aren't in the schema, so
the document may actually get altered.
"""
if not self.field_list:
return doc
fixed_doc = {}
doc[self.unique_key] = doc["_id"]
for key, value in doc.items():
if key in self.field_list[0]:
fixed_doc[key] = value
# Dynamic strings. * can occur only at beginning and at end
else:
for field in self.dynamic_field_list:
if field[0] == '*':
regex = re.compile(r'\w%s\b' % (field))
else:
regex = re.compile(r'\b%s\w' % (field))
if regex.match(key):
fixed_doc[key] = value
return fixed_doc
def stop(self):
""" Stops the instance
"""
self.auto_commit = False
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
try:
self.solr.add([self.clean_doc(doc)], commit=True)
except SolrError:
raise errors.OperationFailed(
"Could not insert %r into Solr" % bsjson.dumps(doc))
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
try:
cleaned = (self.clean_doc(d) for d in docs)
self.solr.add(cleaned, commit=True)
except SolrError:
raise errors.OperationFailed(
"Could not bulk-insert documents into Solr")
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
#.........这里部分代码省略.........
示例8: SolrTestCase
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
#.........这里部分代码省略.........
self.solr.delete(q='*:*')
# Index our docs. Yes, this leans on functionality we're going to test
# later & if it's broken, everything will catastrophically fail.
# Such is life.
self.solr.add(self.docs)
def tearDown(self):
self.solr.delete(q='*:*')
super(SolrTestCase, self).tearDown()
def test_init(self):
self.assertEqual(self.default_solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.default_solr.decoder, json.JSONDecoder))
self.assertEqual(self.default_solr.timeout, 60)
self.assertEqual(self.solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.solr.decoder, json.JSONDecoder))
self.assertEqual(self.solr.timeout, 2)
def assertSameIDs(self, docs, expected_ids):
doc_ids = frozenset([doc['id'] for doc in docs])
ids_set = frozenset(expected_ids)
self.assertEqual(doc_ids, ids_set)
def test__create_full_url(self):
# Nada.
self.assertEqual(self.solr._create_full_url(path=''), 'http://localhost:8983/solr/core0')
# Basic path.
self.assertEqual(self.solr._create_full_url(path='pysolr_tests'), 'http://localhost:8983/solr/core0/pysolr_tests')
# Leading slash (& making sure we don't touch the trailing slash).
self.assertEqual(self.solr._create_full_url(path='/pysolr_tests/select/?whatever=/'), 'http://localhost:8983/solr/core0/pysolr_tests/select/?whatever=/')
def test__send_request(self):
# Test a valid request.
resp_body = self.solr._send_request('GET', 'select/?q=doc&wt=json')
self.assertTrue('"numFound":3' in resp_body)
# Test a lowercase method & a body.
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee!</field></doc></add>'
resp_body = self.solr._send_request('POST', 'update/?commit=true', body=xml_body, headers={
'Content-type': 'text/xml; charset=utf-8',
})
self.assertTrue('<int name="status">0</int>' in resp_body)
# Test a non-existent URL.
old_url = self.solr.url
self.solr.url = 'http://127.0.0.1:567898/wahtever'
self.assertRaises(SolrError, self.solr._send_request, 'get', 'select/?q=doc&wt=json')
self.solr.url = old_url
def test__select(self):
# Short params.
resp_body = self.solr._select({'q': 'doc'})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 3)
# Long params.
resp_body = self.solr._select({'q': 'doc' * 1024})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 0)
self.assertEqual(len(resp_data['responseHeader']['params']['q']), 3 * 1024)
def test__mlt(self):
resp_body = self.solr._mlt({'q': 'id:doc_1', 'mlt.fl': 'title'})
resp_data = json.loads(resp_body)
示例9: SolrTestCase
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class SolrTestCase(unittest.TestCase):
def setUp(self):
super(SolrTestCase, self).setUp()
self.default_solr = Solr("http://localhost:8983/solr/core0")
# Short timeouts.
self.solr = Solr("http://localhost:8983/solr/core0", timeout=2)
self.docs = [
{"id": "doc_1", "title": "Example doc 1", "price": 12.59, "popularity": 10},
{"id": "doc_2", "title": "Another example ☃ doc 2", "price": 13.69, "popularity": 7},
{"id": "doc_3", "title": "Another thing", "price": 2.35, "popularity": 8},
{"id": "doc_4", "title": "doc rock", "price": 99.99, "popularity": 10},
{"id": "doc_5", "title": "Boring", "price": 1.12, "popularity": 2},
]
# Clear it.
self.solr.delete(q="*:*")
# Index our docs. Yes, this leans on functionality we're going to test
# later & if it's broken, everything will catastrophically fail.
# Such is life.
self.solr.add(self.docs)
def tearDown(self):
self.solr.delete(q="*:*")
super(SolrTestCase, self).tearDown()
def test_init(self):
self.assertEqual(self.default_solr.url, "http://localhost:8983/solr/core0")
self.assertTrue(isinstance(self.default_solr.decoder, json.JSONDecoder))
self.assertEqual(self.default_solr.timeout, 60)
self.assertEqual(self.solr.url, "http://localhost:8983/solr/core0")
self.assertTrue(isinstance(self.solr.decoder, json.JSONDecoder))
self.assertEqual(self.solr.timeout, 2)
def test__create_full_url(self):
# Nada.
self.assertEqual(self.solr._create_full_url(path=""), "http://localhost:8983/solr/core0")
# Basic path.
self.assertEqual(
self.solr._create_full_url(path="pysolr_tests"), "http://localhost:8983/solr/core0/pysolr_tests"
)
# Leading slash (& making sure we don't touch the trailing slash).
self.assertEqual(
self.solr._create_full_url(path="/pysolr_tests/select/?whatever=/"),
"http://localhost:8983/solr/core0/pysolr_tests/select/?whatever=/",
)
def test__send_request(self):
# Test a valid request.
resp_body = self.solr._send_request("GET", "select/?q=doc&wt=json")
self.assertTrue('"numFound":3' in resp_body)
# Test a lowercase method & a body.
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee! ☃</field></doc></add>'
resp_body = self.solr._send_request(
"POST", "update/?commit=true", body=xml_body, headers={"Content-type": "text/xml; charset=utf-8"}
)
self.assertTrue('<int name="status">0</int>' in resp_body)
# Test a non-existent URL.
old_url = self.solr.url
self.solr.url = "http://127.0.0.1:567898/wahtever"
self.assertRaises(SolrError, self.solr._send_request, "get", "select/?q=doc&wt=json")
self.solr.url = old_url
# Test bad core as well
self.solr.url = "http://localhost:8983/solr/bad_core"
try:
self.assertRaises(SolrError, self.solr._send_request, "get", "select/?q=doc&wt=json")
finally:
self.solr.url = old_url
def test__select(self):
# Short params.
resp_body = self.solr._select({"q": "doc"})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data["response"]["numFound"], 3)
# Long params.
resp_body = self.solr._select({"q": "doc" * 1024})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data["response"]["numFound"], 0)
self.assertEqual(len(resp_data["responseHeader"]["params"]["q"]), 3 * 1024)
# Test Deep Pagination CursorMark
resp_body = self.solr._select({"q": "*", "cursorMark": "*", "sort": "id desc", "start": 0, "rows": 2})
resp_data = json.loads(resp_body)
self.assertEqual(len(resp_data["response"]["docs"]), 2)
self.assertIn("nextCursorMark", resp_data)
def test__mlt(self):
resp_body = self.solr._mlt({"q": "id:doc_1", "mlt.fl": "title"})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data["response"]["numFound"], 0)
def test__suggest_terms(self):
resp_body = self.solr._select({"terms.fl": "title"})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data["response"]["numFound"], 0)
#.........这里部分代码省略.........
示例10: SolrTestCase
# 需要导入模块: from pysolr import Solr [as 别名]
# 或者: from pysolr.Solr import _send_request [as 别名]
class SolrTestCase(unittest.TestCase):
def setUp(self):
super(SolrTestCase, self).setUp()
self.default_solr = Solr('http://localhost:8983/solr/core0')
# Short timeouts.
self.solr = Solr('http://localhost:8983/solr/core0', timeout=2)
self.docs = [
{
'id': 'doc_1',
'title': 'Example doc 1',
'price': 12.59,
'popularity': 10,
},
{
'id': 'doc_2',
'title': 'Another example ☃ doc 2',
'price': 13.69,
'popularity': 7,
},
{
'id': 'doc_3',
'title': 'Another thing',
'price': 2.35,
'popularity': 8,
},
{
'id': 'doc_4',
'title': 'doc rock',
'price': 99.99,
'popularity': 10,
},
{
'id': 'doc_5',
'title': 'Boring',
'price': 1.12,
'popularity': 2,
},
]
# Clear it.
self.solr.delete(q='*:*')
# Index our docs. Yes, this leans on functionality we're going to test
# later & if it's broken, everything will catastrophically fail.
# Such is life.
self.solr.add(self.docs)
# Mock the _send_request method on the solr instance so that we can
# test that custom handlers are called correctly.
self.solr._send_request = Mock(wraps=self.solr._send_request)
def tearDown(self):
self.solr.delete(q='*:*')
super(SolrTestCase, self).tearDown()
def test_init(self):
self.assertEqual(self.default_solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.default_solr.decoder, json.JSONDecoder))
self.assertEqual(self.default_solr.timeout, 60)
self.assertEqual(self.solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.solr.decoder, json.JSONDecoder))
self.assertEqual(self.solr.timeout, 2)
def test__create_full_url(self):
# Nada.
self.assertEqual(self.solr._create_full_url(path=''), 'http://localhost:8983/solr/core0')
# Basic path.
self.assertEqual(self.solr._create_full_url(path='pysolr_tests'), 'http://localhost:8983/solr/core0/pysolr_tests')
# Leading slash (& making sure we don't touch the trailing slash).
self.assertEqual(self.solr._create_full_url(path='/pysolr_tests/select/?whatever=/'), 'http://localhost:8983/solr/core0/pysolr_tests/select/?whatever=/')
def test__send_request(self):
# Test a valid request.
resp_body = self.solr._send_request('GET', 'select/?q=doc&wt=json')
self.assertTrue('"numFound":3' in resp_body)
# Test a lowercase method & a body.
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee! ☃</field></doc></add>'
resp_body = self.solr._send_request('POST', 'update/?commit=true', body=xml_body, headers={
'Content-type': 'text/xml; charset=utf-8',
})
self.assertTrue('<int name="status">0</int>' in resp_body)
# Test a non-existent URL.
old_url = self.solr.url
self.solr.url = 'http://127.0.0.1:567898/wahtever'
self.assertRaises(SolrError, self.solr._send_request, 'get', 'select/?q=doc&wt=json')
self.solr.url = old_url
# Test bad core as well
self.solr.url = 'http://localhost:8983/solr/bad_core'
try:
self.assertRaises(SolrError, self.solr._send_request, 'get', 'select/?q=doc&wt=json')
finally:
self.solr.url = old_url
def test__select(self):
# Short params.
resp_body = self.solr._select({'q': 'doc'})
#.........这里部分代码省略.........