本文整理汇总了Python中intelmq.lib.message.Report类的典型用法代码示例。如果您正苦于以下问题:Python Report类的具体用法?Python Report怎么用?Python Report使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Report类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process
def process(self):
self.logger.debug("Started looking for files.")
if os.path.isdir(self.parameters.path):
p = os.path.abspath(self.parameters.path)
# iterate over all files in dir
for f in os.listdir(p):
filename = os.path.join(p, f)
if os.path.isfile(filename):
if fnmatch.fnmatch(f, '*' + self.parameters.postfix):
self.logger.info("Processing file %r." % filename)
with open(filename, 'r') as f:
report = Report()
report.add("raw", f.read())
report.add("feed.url", "file://localhost%s" % filename)
self.send_message(report)
if self.parameters.delete_file:
try:
os.remove(filename)
self.logger.debug("Deleted file: %r." % filename)
except PermissionError:
self.logger.error("Could not delete file %r." % filename)
self.logger.info("Maybe I don't have sufficient rights on that file?")
self.logger.error("Stopping now, to prevent reading this file again.")
self.stop()
示例2: process
def process(self):
# Grab the events from MISP
misp_result = self.misp.search(
tags=self.parameters.misp_tag_to_process
)
# Process the response and events
if 'response' in misp_result:
# Extract the MISP event details
for e in misp_result['response']:
misp_event = e['Event']
# Send the results to the parser
report = Report()
report.add('raw', json.dumps(misp_event, sort_keys=True))
report.add('feed.url', self.parameters.misp_url)
self.send_message(report)
# Finally, update the tags on the MISP events.
# Note PyMISP does not currently support this so we use
# the API URLs directly with the requests module.
for misp_event in misp_result['response']:
# Remove the 'to be processed' tag
self.misp.remove_tag(misp_event,
self.parameters.misp_tag_to_process)
# Add a 'processed' tag to the event
self.misp.add_tag(misp_event,
self.parameters.misp_tag_processed)
示例3: process
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host, self.parameters.mail_user, self.parameters.mail_password, self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.folder, unread=True)
if emails:
for uid, message in emails:
if self.parameters.subject_regex and not re.search(self.parameters.subject_regex, message.subject):
continue
self.logger.info("Reading email report")
for body in message.body['plain']:
match = re.search(self.parameters.url_regex, body)
if match:
url = match.group()
self.logger.info("Downloading report from %s" % url)
raw_report = fetch_url(url, timeout = 60.0, chunk_size = 16384)
self.logger.info("Report downloaded.")
report = Report()
report.add("raw", raw_report, sanitize=True)
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
示例4: process
def process(self):
self.logger.info("Downloading report from %s" %
self.parameters.http_url)
resp = requests.get(url=self.parameters.http_url, auth=self.auth,
proxies=self.proxy, headers=self.http_header,
verify=self.http_verify_cert)
if resp.status_code // 100 != 2:
raise ValueError('HTTP response status code was {}.'
''.format(resp.status_code))
self.logger.info("Report downloaded.")
raw_reports = []
try:
zfp = zipfile.ZipFile(io.BytesIO(resp.content), "r")
except zipfile.BadZipfile:
raw_reports.append(resp.text)
else:
self.logger.info('Downloaded zip file, extracting following files:'
' ' + ', '.join(zfp.namelist()))
for filename in zfp.namelist():
raw_reports.append(zfp.read(filename))
for raw_report in raw_reports:
report = Report()
report.add("raw", raw_report)
report.add("feed.url", self.parameters.http_url)
self.send_message(report)
示例5: process
def process(self):
mailbox = imbox.Imbox(self.parameters.mail_host,
self.parameters.mail_user,
self.parameters.mail_password,
self.parameters.mail_ssl)
emails = mailbox.messages(folder=self.parameters.mail_folder,
unread=True)
reflags = re.IGNORECASE if getattr(self.parameters,
"mail_subject_ignorecase",
False) else 0
if emails:
for uid, message in emails:
if (self.parameters.mail_subject_regex and
not re.search(self.parameters.mail_subject_regex,
message.subject, flags=reflags)):
self.logger.info("Subject regex not matched: '%s' in '%s'",
self.parameters.mail_subject_regex,
message.subject)
continue
self.logger.info("Reading email report")
report = Report()
report.add("raw", message.body['plain'][0], sanitize=True)
report.add("feed.name", self.parameters.feed,
sanitize=True)
self.send_message(report)
mailbox.mark_seen(uid)
self.logger.info("Email report read")
示例6: test_generate_reports_with_chunking_and_copying_header
def test_generate_reports_with_chunking_and_copying_header(self):
"""Test generate_reports with chunking and copying the header"""
chunk_size = 1000
# This test only makes sense if the test data actually is longer
# than the chunk size
self.assertTrue(chunk_size < len(csv_test_data))
template = Report(harmonization=HARM)
template.add("feed.name",
"test_generate_reports_with_chunking_and_header")
observation_time = template["time.observation"]
original_header = io.BytesIO(csv_test_data).readline()
decoded_chunks = [original_header]
for report in generate_reports(template, io.BytesIO(csv_test_data),
chunk_size=chunk_size,
copy_header_line=True):
self.assertEqual(report["feed.name"],
"test_generate_reports_with_chunking_and_header")
self.assertEqual(report["time.observation"], observation_time)
report_data = io.BytesIO(base64.b64decode(report["raw"]))
header = report_data.readline()
chunk = report_data.read()
self.assertEqual(original_header, header)
decoded_chunks.append(chunk)
self.assertEqual(b"".join(decoded_chunks), csv_test_data)
示例7: process
def process(self):
self.logger.info("Downloading report from %s." %
(self.parameters.ftp_host + ':' +
str(self.parameters.ftp_port)))
ftps = FTPS()
ftps.connect(host=self.parameters.ftps_host,
port=self.parameters.ftps_port)
if hasattr(self.parameters, 'ftps_username') \
and hasattr(self.parameters, 'ftps_password'):
ftps.login(user=self.parameters.ftps_username,
passwd=self.parameters.ftps_password)
ftps.prot_p()
cwd = '/'
if hasattr(self.parameters, 'ftps_directory'):
self.logger.debug('Changing working directory to: %r.'
'' % self.parameters.ftp_directory)
cwd = self.parameters.ftps_directory
ftps.cwd(cwd)
filemask = '*'
if hasattr(self.parameters, 'ftps_file'):
self.logger.debug('Setting filemask to to: %r.'
'' % self.parameters.ftp_file)
filemask = self.parameters.ftps_file
mem = io.BytesIO()
files = fnmatch.filter(ftps.nlst(), filemask)
if files:
self.logger.info('Retrieving file: ' + files[-1])
ftps.retrbinary("RETR " + files[-1], mem.write)
else:
self.logger.error("No file found, terminating download")
return
self.logger.info("Report downloaded.")
raw_reports = []
try:
zfp = zipfile.ZipFile(mem, "r")
except zipfile.BadZipfile:
raw_reports.append(mem.getvalue())
else:
self.logger.info('Downloaded zip file, extracting following files: %r'
'' % zfp.namelist())
for filename in zfp.namelist():
raw_reports.append(zfp.read(filename))
for raw_report in raw_reports:
report = Report()
report.add("raw", raw_report, sanitize=True)
report.add("feed.url", 'ftps://' + self.parameters.ftps_host + ':' +
str(self.parameters.ftps_port), sanitize=True)
self.send_message(report)
示例8: test_generate_reports_no_chunking
def test_generate_reports_no_chunking(self):
"""Test generate_reports with chunking disabled"""
template = Report(harmonization=HARM)
template.add("feed.name", "test_generate_reports_no_chunking")
[report] = list(generate_reports(template, io.BytesIO(csv_test_data),
chunk_size=None,
copy_header_line=False))
self.assertEqual(report["feed.name"],
"test_generate_reports_no_chunking")
self.assertEqual(base64.b64decode(report["raw"]), csv_test_data)
示例9: on_message
def on_message(self, headers, message):
self.n6stomper.logger.debug('Receive message '
'{!r}...'.format(message[:500]))
report = Report()
report.add("raw", message.rstrip())
report.add("feed.url", "stomp://" +
self.n6stomper.parameters.server +
":" + str(self.n6stomper.parameters.port) +
"/" + self.n6stomper.parameters.exchange)
self.n6stomper.send_message(report)
示例10: on_receive
def on_receive(self, data):
for line in data.decode().splitlines():
line = line.strip()
if line == "":
continue
report = Report()
report.add("raw", line)
report.add("feed.url", self.parameters.http_url)
self.send_message(report)
示例11: process
def process(self):
self.logger.info("Downloading report from %s" % self.parameters.url)
raw_report = fetch_url(
self.parameters.url,
timeout = 60.0,
chunk_size = 16384,
http_proxy=self.parameters.http_proxy,
https_proxy=self.parameters.https_proxy
)
self.logger.info("Report downloaded.")
report = Report()
report.add("raw", raw_report, sanitize=True)
self.send_message(report)
示例12: process
def process(self):
self.logger.info("Downloading report through API")
otx = OTXv2(self.parameters.api_key)
pulses = otx.getall()
self.logger.info("Report downloaded.")
report = Report()
report.add("raw", json.dumps(pulses), sanitize=True)
report.add("feed.name", self.parameters.feed, sanitize=True)
report.add("feed.accuracy", self.parameters.accuracy, sanitize=True)
time_observation = DateTime().generate_datetime_now()
report.add('time.observation', time_observation, sanitize=True)
self.send_message(report)
示例13: process
def process(self):
# Grab the events from MISP
misp_result = self.misp.search(
tags=self.parameters.misp_tag_to_process
)
# Process the response and events
if 'response' in misp_result:
# Extract the MISP event details
misp_events = list()
for result in misp_result['response']:
misp_events.append(result['Event'])
# Send the results to the parser
report = Report()
report.add('raw', json.dumps(misp_events, sort_keys=True))
report.add('feed.name', self.parameters.feed)
report.add('feed.url', self.parameters.misp_url)
report.add('feed.accuracy', self.parameters.accuracy)
self.send_message(report)
# Finally, update the tags on the MISP events.
# Note PyMISP does not currently support this so we use
# the API URLs directly with the requests module.
session = requests.Session()
session.headers.update({
'Authorization': self.misp.key,
'Accept': 'application/json',
'Content-Type': 'application/json',
})
post_data = {
'request': {
'Event': {
'tag': None,
'id': None,
}}}
for misp_event in misp_events:
post_data['request']['Event']['id'] = misp_event['id']
# Remove the 'to be processed' tag
tag = self.parameters.misp_tag_to_process
post_data['request']['Event']['tag'] = tag
session.post(self.misp_del_tag_url, data=json.dumps(post_data))
# Add a 'processed' tag to the event
tag = self.parameters.misp_tag_processed
post_data['request']['Event']['tag'] = tag
session.post(self.misp_add_tag_url, data=json.dumps(post_data))
示例14: generate_reports
def generate_reports(report_template: Report, infile: BinaryIO, chunk_size: Optional[int],
copy_header_line: bool) -> Generator[Report, None, None]:
"""Generate reports from a template and input file, optionally split into chunks.
If chunk_size is None, a single report is generated with the entire
contents of infile as the raw data. Otherwise chunk_size should be
an integer giving the maximum number of bytes in a chunk. The data
read from infile is then split into chunks of this size at newline
characters (see read_delimited_chunks). For each of the chunks, this
function yields a copy of the report_template with that chunk as the
value of the raw attribute.
When splitting the data into chunks, if copy_header_line is true,
the first line the file is read before chunking and then prepended
to each of the chunks. This is particularly useful when splitting
CSV files.
The infile should be a file-like object. generate_reports uses only
two methods, readline and read, with readline only called once and
only if copy_header_line is true. Both methods should return bytes
objects.
Params:
report_template: report used as template for all yielded copies
infile: stream to read from
chunk_size: maximum size of each chunk
copy_header_line: copy the first line of the infile to each chunk
Yields:
report: a Report object holding the chunk in the raw field
"""
if chunk_size is None:
report = report_template.copy()
data = infile.read()
if data:
report.add("raw", data, overwrite=True)
yield report
else:
header = b""
if copy_header_line:
header = infile.readline()
for chunk in read_delimited_chunks(infile, chunk_size):
report = report_template.copy()
report.add("raw", header + chunk, overwrite=True)
yield report
示例15: on_receive
def on_receive(self, data):
for line in data.split("\n"):
line = line.strip()
if line == "":
continue
report = Report()
report.add("raw", str(line), sanitize=True)
report.add("feed.name", self.parameters.feed, sanitize=True)
report.add("feed.accuracy", self.parameters.accuracy, sanitize=True)
time_observation = DateTime().generate_datetime_now()
report.add("time.observation", time_observation, sanitize=True)
self.send_message(report)