本文整理汇总了Python中splunklib.results.ResultsReader方法的典型用法代码示例。如果您正苦于以下问题:Python results.ResultsReader方法的具体用法?Python results.ResultsReader怎么用?Python results.ResultsReader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类splunklib.results
的用法示例。
在下文中一共展示了results.ResultsReader方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: export_report
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def export_report(self):
job_obj = None
for j in self.service.jobs:
if j.sid == self.sid:
job_obj = j
if job_obj is None:
print("Job SID {} not found. Did it expire?".format(self.sid))
sys.exit()
if not job_obj.is_ready():
print("Job SID {} is still processing. "
"Please wait to re-run".format(self.sir))
export_data = []
job_results = job_obj.results(rf=self.cols)
for result in results.ResultsReader(job_results):
export_data.append(result)
self.write_csv(self.file, self.cols, export_data)
示例2: get_results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def get_results(self, job, count: int) -> list: # pragma: no cover
"""Return events from a finished Job as an array of dictionaries.
Parameters
----------
job : Job
Job object to pull results from.
Returns
-------
list
The results of the search.
"""
import splunklib.results as results
out = [result for result in results.ResultsReader(job.results(count=count))]
job.cancel()
return out
示例3: get_current_splunk_time
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def get_current_splunk_time(splunk_service):
t = datetime.utcnow() - timedelta(days=3)
time = t.strftime(SPLUNK_TIME_FORMAT)
kwargs_oneshot = {'count': 1, 'earliest_time': time}
searchquery_oneshot = '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S")' \
' | sort 1 -_time | table clock'
oneshotsearch_results = splunk_service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot)
reader = results.ResultsReader(oneshotsearch_results)
for item in reader:
if isinstance(item, results.Message):
return item.message["clock"]
if isinstance(item, dict):
return item["clock"]
raise ValueError('Error: Could not fetch Splunk time')
示例4: parse_batch_of_results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def parse_batch_of_results(current_batch_of_results, max_results_to_add, app):
parsed_batch_results = []
batch_dbot_scores = []
results_reader = results.ResultsReader(io.BufferedReader(ResponseReaderWrapper(current_batch_of_results)))
for item in results_reader:
if isinstance(item, results.Message):
if "Error in" in item.message:
raise ValueError(item.message)
parsed_batch_results.append(convert_to_str(item.message))
elif isinstance(item, dict):
if demisto.get(item, 'host'):
batch_dbot_scores.append({'Indicator': item['host'], 'Type': 'hostname',
'Vendor': 'Splunk', 'Score': 0, 'isTypedIndicator': True})
if app:
item['app'] = app
# Normal events are returned as dicts
parsed_batch_results.append(item)
if len(parsed_batch_results) >= max_results_to_add:
break
return parsed_batch_results, batch_dbot_scores
示例5: write_results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def write_results(job):
"""Writes results to a tempfile"""
reader = results.ResultsReader(job.results())
temp_filename = ""
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_filename = temp_file.name
writer = None
for result in reader:
if isinstance(result, dict):
if not writer:
writer = csv.DictWriter(temp_file, fieldnames=result.keys(), dialect='excel')
writer.writeheader()
writer.writerow(result)
return temp_filename
#end write_results
示例6: results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def results(self, **query_params):
"""Returns a streaming handle to this job's search results. To get a
nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
as in::
import splunklib.client as client
import splunklib.results as results
from time import sleep
service = client.connect(...)
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
rr = results.ResultsReader(job.results())
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Results are not available until the job has finished. If called on
an unfinished job, the result is an empty event set.
This method makes a single roundtrip
to the server, plus at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param query_params: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/results
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults>`_.
:type query_params: ``dict``
:return: The ``InputStream`` IO handle to this job's results.
"""
query_params['segmentation'] = query_params.get('segmentation', 'none')
return self.get("results", **query_params).body
示例7: _parse_results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def _parse_results(self, handle):
""" Wraps output from Splunk searches with the Splunk ResultsReader.
Splunk typically retrieves events debug statements, errors through the same stream.
Debug/Info messages will be displayed and actual results
:param handle: Splunk search job generator
"""
result_reader = ResultsReader(handle)
for result in result_reader:
# Diagnostic messages may be returned in the results
if isinstance(result, Message):
logger.debug('[{}] {}'.format(result.type, result.message))
# Normal events are returned as dicts
elif isinstance(result, dict):
result = dict(result)
if '_time' in result:
result['_time'] = SplunkAbstraction._to_datetime(result['_time'])
yield {
'time': result['_time'] if '_time' in result else '',
'metadata': {k: v for k, v in result.items() if k.startswith('_')},
'state': {k: v for k, v in result.items() if not k.startswith('_')}
}
else:
logger.warning('Unknown result type in _parse_results: {}'.format(result))
assert result_reader.is_preview is False
示例8: splunk_results_command
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def splunk_results_command(service):
res = []
sid = demisto.args().get('sid', '')
try:
job = service.job(sid)
except HTTPError as error:
if error.message == 'HTTP 404 Not Found -- Unknown sid.':
demisto.results("Found no job for sid: {}".format(sid))
else:
return_error(error.message, error)
else:
for result in results.ResultsReader(job.results()):
if isinstance(result, results.Message):
demisto.results({"Type": 1, "ContentsFormat": "json", "Contents": json.dumps(result.message)})
elif isinstance(result, dict):
# Normal events are returned as dicts
res.append(result)
demisto.results({"Type": 1, "ContentsFormat": "json", "Contents": json.dumps(res)})
示例9: run
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def run(urlencoded,resp):
HOST = "splunkIP"
PORT = 8089
USERNAME= "abhishekratan"
PASSWORD ="abhishekratan"
# Create a Service instance and log in
service = client.connect(
host=HOST,
port=PORT,
username=USERNAME,
password=PASSWORD)
kwargs_oneshot = {"earliest_time": "-30d@d",
"count": 10
}
url = unquote(urlencoded)
oneshotsearch_results = service.jobs.oneshot(url, **kwargs_oneshot)
reader = results.ResultsReader(oneshotsearch_results)
result=[]
for item in reader:
print(item)
result.append(item)
return_result=result
result={"result":result}
print(return_result)
resp.body=json.dumps(result)
return return_result
示例10: _retrieve_parallel_worker
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def _retrieve_parallel_worker(self, job, offset_queue, page_size, search_results):
while not offset_queue.empty():
offset = offset_queue.get()
paginate_args = dict(
count=page_size,
offset=offset
)
page_results = job.results(**paginate_args)
for result in results.ResultsReader(page_results):
if isinstance(result, dict):
search_results.append(result)
示例11: get_results
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def get_results(job, limit):
"""Return a collection of results"""
reader = results.ResultsReader(job.results(count=limit))
return {"results": [row for row in reader]}
#end get_results
示例12: preview
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
be finished to
return any results, the ``preview`` method returns any results that have
been generated so far, whether the job is running or not. The
returned search results are the raw data from the server. Pass
the handle returned to :class:`splunklib.results.ResultsReader` to get a
nice, Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
rr = results.ResultsReader(job.preview())
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
if rr.is_preview:
print "Preview of a running search job."
else:
print "Job is finished. Results are final."
This method makes one roundtrip to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param query_params: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/results_preview
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_
in the REST API documentation.
:type query_params: ``dict``
:return: The ``InputStream`` IO handle to this job's preview results.
"""
query_params['segmentation'] = query_params.get('segmentation', 'none')
return self.get("results_preview", **query_params).body
示例13: export
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def export(self, query, **params):
"""Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to export.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(path_segment="export",
search=query,
**params).body
示例14: oneshot
# 需要导入模块: from splunklib import results [as 别名]
# 或者: from splunklib.results import ResultsReader [as 别名]
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
The ``InputStream`` object streams XML fragments from the server. To
parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
The ``oneshot`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`results`), plus at most two more
if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises ValueError: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional):
- "output_mode": Specifies the output format of the results (XML,
JSON, or CSV).
- "earliest_time": Specifies the earliest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "latest_time": Specifies the latest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "rf": Specifies one or more fields to add to the search.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to oneshot.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(search=query,
exec_mode="oneshot",
**params).body