本文整理汇总了Python中elastalert.elastalert.ElastAlerter类的典型用法代码示例。如果您正苦于以下问题:Python ElastAlerter类的具体用法?Python ElastAlerter怎么用?Python ElastAlerter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ElastAlerter类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ea
def ea():
rules = [{'es_host': '',
'es_port': '',
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': []}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'rules': rules,
'max_query_size': 100000,
'old_query_limit': datetime.timedelta(weeks=1)}
elasticsearch.client.Elasticsearch = mock_es_client
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
ea = ElastAlerter(['--pin_rules'])
ea.rules[0]['type'] = mock_ruletype()
ea.rules[0]['alert'] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {'hits': {'hits': []}}
ea.writeback_es.create.return_value = {'_id': 'ABCD'}
ea.current_es = mock_es_client('', '')
return ea
示例2: check_files
def check_files():
print("Note: This tool is for testing filters and config syntax. It will not process data or alert.\n")
parser = argparse.ArgumentParser(description='Validate a rule configuration')
parser.add_argument('files', metavar='file', type=str, nargs='+', help='rule configuration filename')
parser.add_argument('--schema-only', action='store_true', help='Show only schema errors; do not run query')
parser.add_argument('--days', type=int, default=[1, 7], nargs='+', help='Query the previous N days with this rule')
args = parser.parse_args()
for filename in args.files:
with open(filename) as fh:
conf = yaml.load(fh)
load_options(conf)
print("Successfully loaded %s\n" % (conf['name']))
if args.schema_only:
continue
es_client = Elasticsearch(host=conf['es_host'], port=conf['es_port'])
for days in args.days:
start_time = ts_now() - datetime.timedelta(days=days)
end_time = ts_now()
ts = conf.get('timestamp_field', '@timestamp')
query = ElastAlerter.get_query(conf['filter'], starttime=start_time, endtime=end_time, timestamp_field=ts)
index = ElastAlerter.get_index(conf, start_time, end_time)
try:
res = es_client.search(index, size=1000, body=query)
except Exception as e:
print("Error running your filter:")
print(repr(e)[:2048])
exit(1)
num_hits = len(res['hits']['hits'])
print("Got %s hits from the last %s day%s" % (num_hits if num_hits != 1000 else '1000+', days,
's' if days > 1 else ''))
if num_hits:
print("\nAvailable terms in first hit:")
terms = res['hits']['hits'][0]['_source']
print_terms(terms, '')
pk = conf.get('primary_key')
ck = conf.get('compare_key')
if pk and not lookup_es_key(terms, pk):
print("Warning: primary key %s is either missing or null!")
if ck and not lookup_es_key(terms, ck):
print("Warning: compare key %s is either missing or null!")
include = conf.get('include')
if include:
for term in include:
if not lookup_es_key(terms, term) and '*' not in term:
print("Included term %s may be missing or null" % (term))
for term in conf.get('top_count_keys', []):
# If the index starts with 'logstash', fields with .raw will be available but won't in _source
if term not in terms and not (term.endswith('.raw') and term[:-4] in terms and index.startswith('logstash')):
print("top_count_key %s may be missing" % (term))
print('')
示例3: ea
def ea():
rules = [
{
"es_host": "",
"es_port": "",
"name": "anytest",
"index": "idx",
"filter": [],
"not_filter": [],
"include": ["@timestamp"],
"aggregation": datetime.timedelta(0),
"realert": datetime.timedelta(0),
"processed_hits": {},
"timestamp_field": "@timestamp",
"match_enhancements": [],
"rule_file": "blah.yaml",
"ts_to_dt": ts_to_dt,
"dt_to_ts": dt_to_ts,
"_source_enabled": True,
}
]
conf = {
"rules_folder": "rules",
"run_every": datetime.timedelta(minutes=10),
"buffer_time": datetime.timedelta(minutes=5),
"alert_time_limit": datetime.timedelta(hours=24),
"es_host": "es",
"es_port": 14900,
"writeback_index": "wb",
"rules": rules,
"max_query_size": 100000,
"old_query_limit": datetime.timedelta(weeks=1),
"disable_rules_on_error": False,
}
elasticsearch.client.Elasticsearch = mock_es_client
with mock.patch("elastalert.elastalert.get_rule_hashes"):
with mock.patch("elastalert.elastalert.load_rules") as load_conf:
load_conf.return_value = conf
ea = ElastAlerter(["--pin_rules"])
ea.rules[0]["type"] = mock_ruletype()
ea.rules[0]["alert"] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {"hits": {"hits": []}}
ea.writeback_es.create.return_value = {"_id": "ABCD"}
ea.current_es = mock_es_client("", "")
return ea
示例4: mock_hits
def mock_hits(self, rule, start, end, index):
""" Mocks the effects of get_hits using global data instead of Elasticsearch. """
docs = []
for doc in self.data:
if start <= ts_to_dt(doc[rule['timestamp_field']]) < end:
docs.append(doc)
# Remove all fields which don't match 'include'
for doc in docs:
for field in doc:
if field != '_id':
if not any([re.match(incl.replace('*', '.*'), field) for incl in rule['include']]):
doc.pop(field)
# Separate _source and _id, convert timestamps
resp = [{'_source': doc, '_id': doc['_id']} for doc in docs]
for doc in resp:
doc['_source'].pop('_id')
ElastAlerter.process_hits(rule, resp)
return resp
示例5: mock_hits
def mock_hits(self, rule, start, end, index):
""" Mocks the effects of get_hits using global data instead of Elasticsearch. """
docs = []
for doc in self.data:
if start <= ts_to_dt(doc[rule["timestamp_field"]]) < end:
docs.append(doc)
# Remove all fields which don't match 'include'
for doc in docs:
fields_to_remove = []
for field in doc:
if field != "_id":
if not any([re.match(incl.replace("*", ".*"), field) for incl in rule["include"]]):
fields_to_remove.append(field)
map(doc.pop, fields_to_remove)
# Separate _source and _id, convert timestamps
resp = [{"_source": doc, "_id": doc["_id"]} for doc in docs]
for doc in resp:
doc["_source"].pop("_id")
ElastAlerter.process_hits(rule, resp)
return resp
示例6: test_file
def test_file(self, conf, args):
""" Loads a rule config file, performs a query over the last day (args.days), lists available keys
and prints the number of results. """
load_options(conf, {})
print("Successfully loaded %s\n" % (conf['name']))
if args.schema_only:
return []
# Set up elasticsearch client and query
es_config = ElastAlerter.build_es_conn_config(conf)
es_client = ElastAlerter.new_elasticsearch(es_config)
start_time = ts_now() - datetime.timedelta(days=args.days)
end_time = ts_now()
ts = conf.get('timestamp_field', '@timestamp')
query = ElastAlerter.get_query(conf['filter'], starttime=start_time, endtime=end_time, timestamp_field=ts)
index = ElastAlerter.get_index(conf, start_time, end_time)
# Get one document for schema
try:
res = es_client.search(index, size=1, body=query, ignore_unavailable=True)
except Exception as e:
print("Error running your filter:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = len(res['hits']['hits'])
if not num_hits:
return []
terms = res['hits']['hits'][0]['_source']
doc_type = res['hits']['hits'][0]['_type']
# Get a count of all docs
count_query = ElastAlerter.get_query(conf['filter'], starttime=start_time, endtime=end_time, timestamp_field=ts, sort=False)
count_query = {'query': {'filtered': count_query}}
try:
res = es_client.count(index, doc_type=doc_type, body=count_query, ignore_unavailable=True)
except Exception as e:
print("Error querying Elasticsearch:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = res['count']
print("Got %s hits from the last %s day%s" % (num_hits, args.days, 's' if args.days > 1 else ''))
print("\nAvailable terms in first hit:")
print_terms(terms, '')
# Check for missing keys
pk = conf.get('primary_key')
ck = conf.get('compare_key')
if pk and not lookup_es_key(terms, pk):
print("Warning: primary key %s is either missing or null!", file=sys.stderr)
if ck and not lookup_es_key(terms, ck):
print("Warning: compare key %s is either missing or null!", file=sys.stderr)
include = conf.get('include')
if include:
for term in include:
if not lookup_es_key(terms, term) and '*' not in term:
print("Included term %s may be missing or null" % (term), file=sys.stderr)
for term in conf.get('top_count_keys', []):
# If the index starts with 'logstash', fields with .raw will be available but won't in _source
if term not in terms and not (term.endswith('.raw') and term[:-4] in terms and index.startswith('logstash')):
print("top_count_key %s may be missing" % (term), file=sys.stderr)
print('') # Newline
# Download up to 10,000 documents to save
if args.save and not args.count:
try:
res = es_client.search(index, size=10000, body=query, ignore_unavailable=True)
except Exception as e:
print("Error running your filter:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = len(res['hits']['hits'])
print("Downloaded %s documents to save" % (num_hits))
return res['hits']['hits']
return None
示例7: run_elastalert
def run_elastalert(self, rule, args):
""" Creates an ElastAlert instance and run's over for a specific rule using either real or mock data. """
# Mock configuration. Nothing here is used except run_every
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=5),
'buffer_time': datetime.timedelta(minutes=45),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'max_query_size': 100000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False}
# Load and instantiate rule
load_options(rule, conf)
load_modules(rule)
conf['rules'] = [rule]
# If using mock data, make sure it's sorted and find appropriate time range
timestamp_field = rule.get('timestamp_field', '@timestamp')
if args.json:
if not self.data:
return
try:
self.data.sort(key=lambda x: x[timestamp_field])
starttime = ts_to_dt(self.data[0][timestamp_field])
endtime = self.data[-1][timestamp_field]
endtime = ts_to_dt(endtime) + datetime.timedelta(seconds=1)
except KeyError as e:
print("All documents must have a timestamp and _id: %s" % (e), file=sys.stderr)
return
# Create mock _id for documents if it's missing
used_ids = []
def get_id():
_id = ''.join([random.choice(string.letters) for i in range(16)])
if _id in used_ids:
return get_id()
used_ids.append(_id)
return _id
for doc in self.data:
doc.update({'_id': doc.get('_id', get_id())})
else:
endtime = ts_now()
starttime = endtime - datetime.timedelta(days=args.days)
# Set run_every to cover the entire time range unless use_count_query or use_terms_query is set
# This is to prevent query segmenting which unnecessarily slows down tests
if not rule.get('use_terms_query') and not rule.get('use_count_query'):
conf['run_every'] = endtime - starttime
# Instantiate ElastAlert to use mock config and special rule
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
if args.alert:
client = ElastAlerter(['--verbose'])
else:
client = ElastAlerter(['--debug'])
# Replace get_hits_* functions to use mock data
if args.json:
self.mock_elastalert(client)
# Mock writeback for both real data and json data
client.writeback_es = None
with mock.patch.object(client, 'writeback') as mock_writeback:
client.run_rule(rule, endtime, starttime)
if mock_writeback.call_count:
print("\nWould have written the following documents to elastalert_status:\n")
for call in mock_writeback.call_args_list:
print("%s - %s\n" % (call[0][0], call[0][1]))
示例8: run_elastalert
def run_elastalert(self, rule, conf, args):
""" Creates an ElastAlert instance and run's over for a specific rule using either real or mock data. """
# Load and instantiate rule
load_modules(rule)
conf['rules'] = [rule]
# If using mock data, make sure it's sorted and find appropriate time range
timestamp_field = rule.get('timestamp_field', '@timestamp')
if args.json:
if not self.data:
return None
try:
self.data.sort(key=lambda x: x[timestamp_field])
starttime = ts_to_dt(self.data[0][timestamp_field])
endtime = self.data[-1][timestamp_field]
endtime = ts_to_dt(endtime) + datetime.timedelta(seconds=1)
except KeyError as e:
print("All documents must have a timestamp and _id: %s" % (e), file=sys.stderr)
return None
# Create mock _id for documents if it's missing
used_ids = []
def get_id():
_id = ''.join([random.choice(string.letters) for i in range(16)])
if _id in used_ids:
return get_id()
used_ids.append(_id)
return _id
for doc in self.data:
doc.update({'_id': doc.get('_id', get_id())})
else:
endtime = ts_now()
starttime = endtime - datetime.timedelta(days=args.days)
# Set run_every to cover the entire time range unless count query, terms query or agg query used
# This is to prevent query segmenting which unnecessarily slows down tests
if not rule.get('use_terms_query') and not rule.get('use_count_query') and not rule.get('aggregation_query_element'):
conf['run_every'] = endtime - starttime
# Instantiate ElastAlert to use mock config and special rule
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
if args.alert:
client = ElastAlerter(['--verbose'])
else:
client = ElastAlerter(['--debug'])
# Replace get_hits_* functions to use mock data
if args.json:
self.mock_elastalert(client)
# Mock writeback to return empty results
client.writeback_es = mock.MagicMock()
client.writeback_es.search.return_value = {"hits": {"hits": []}}
with mock.patch.object(client, 'writeback') as mock_writeback:
client.run_rule(rule, endtime, starttime)
if mock_writeback.call_count:
print("\nWould have written the following documents to writeback index (default is elastalert_status):\n")
for call in mock_writeback.call_args_list:
print("%s - %s\n" % (call[0][0], call[0][1]))
示例9: test_file
def test_file(self, conf, args):
""" Loads a rule config file, performs a query over the last day (args.days), lists available keys
and prints the number of results. """
if args.schema_only:
return []
# Set up Elasticsearch client and query
es_client = elasticsearch_client(conf)
try:
ElastAlerter.modify_rule_for_ES5(conf)
except Exception as e:
print("Error connecting to ElasticSearch:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
if args.stop_error:
exit(1)
return None
start_time = ts_now() - datetime.timedelta(days=args.days)
end_time = ts_now()
ts = conf.get('timestamp_field', '@timestamp')
query = ElastAlerter.get_query(
conf['filter'],
starttime=start_time,
endtime=end_time,
timestamp_field=ts,
to_ts_func=conf['dt_to_ts'],
five=conf['five']
)
index = ElastAlerter.get_index(conf, start_time, end_time)
# Get one document for schema
try:
res = es_client.search(index, size=1, body=query, ignore_unavailable=True)
except Exception as e:
print("Error running your filter:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
if args.stop_error:
exit(1)
return None
num_hits = len(res['hits']['hits'])
if not num_hits:
return []
terms = res['hits']['hits'][0]['_source']
doc_type = res['hits']['hits'][0]['_type']
# Get a count of all docs
count_query = ElastAlerter.get_query(
conf['filter'],
starttime=start_time,
endtime=end_time,
timestamp_field=ts,
to_ts_func=conf['dt_to_ts'],
sort=False,
five=conf['five']
)
try:
res = es_client.count(index, doc_type=doc_type, body=count_query, ignore_unavailable=True)
except Exception as e:
print("Error querying Elasticsearch:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
if args.stop_error:
exit(1)
return None
num_hits = res['count']
if args.formatted_output:
self.formatted_output['hits'] = num_hits
self.formatted_output['days'] = args.days
self.formatted_output['terms'] = terms.keys()
self.formatted_output['result'] = terms
else:
print("Got %s hits from the last %s day%s" % (num_hits, args.days, 's' if args.days > 1 else ''))
print("\nAvailable terms in first hit:")
print_terms(terms, '')
# Check for missing keys
pk = conf.get('primary_key')
ck = conf.get('compare_key')
if pk and not lookup_es_key(terms, pk):
print("Warning: primary key %s is either missing or null!", file=sys.stderr)
if ck and not lookup_es_key(terms, ck):
print("Warning: compare key %s is either missing or null!", file=sys.stderr)
include = conf.get('include')
if include:
for term in include:
if not lookup_es_key(terms, term) and '*' not in term:
print("Included term %s may be missing or null" % (term), file=sys.stderr)
for term in conf.get('top_count_keys', []):
# If the index starts with 'logstash', fields with .raw will be available but won't in _source
if term not in terms and not (term.endswith('.raw') and term[:-4] in terms and index.startswith('logstash')):
print("top_count_key %s may be missing" % (term), file=sys.stderr)
if not args.formatted_output:
print('') # Newline
# Download up to max_query_size (defaults to 10,000) documents to save
#.........这里部分代码省略.........
示例10: run_elastalert
def run_elastalert(self, rule, conf, args):
""" Creates an ElastAlert instance and run's over for a specific rule using either real or mock data. """
# Load and instantiate rule
# Pass an args containing the context of whether we're alerting or not
# It is needed to prevent unnecessary initialization of unused alerters
load_modules_args = argparse.Namespace()
load_modules_args.debug = not args.alert
load_modules(rule, load_modules_args)
conf['rules'] = [rule]
# If using mock data, make sure it's sorted and find appropriate time range
timestamp_field = rule.get('timestamp_field', '@timestamp')
if args.json:
if not self.data:
return None
try:
self.data.sort(key=lambda x: x[timestamp_field])
starttime = ts_to_dt(self.data[0][timestamp_field])
endtime = self.data[-1][timestamp_field]
endtime = ts_to_dt(endtime) + datetime.timedelta(seconds=1)
except KeyError as e:
print("All documents must have a timestamp and _id: %s" % (e), file=sys.stderr)
if args.stop_error:
exit(1)
return None
# Create mock _id for documents if it's missing
used_ids = []
def get_id():
_id = ''.join([random.choice(string.letters) for i in range(16)])
if _id in used_ids:
return get_id()
used_ids.append(_id)
return _id
for doc in self.data:
doc.update({'_id': doc.get('_id', get_id())})
else:
if args.end:
if args.end == 'NOW':
endtime = ts_now()
else:
try:
endtime = ts_to_dt(args.end)
except (TypeError, ValueError):
self.handle_error("%s is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (args.end))
exit(1)
else:
endtime = ts_now()
if args.start:
try:
starttime = ts_to_dt(args.start)
except (TypeError, ValueError):
self.handle_error("%s is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (args.start))
exit(1)
else:
# if days given as command line argument
if args.days > 0:
starttime = endtime - datetime.timedelta(days=args.days)
else:
# if timeframe is given in rule
if 'timeframe' in rule:
starttime = endtime - datetime.timedelta(seconds=rule['timeframe'].total_seconds() * 1.01)
# default is 1 days / 24 hours
else:
starttime = endtime - datetime.timedelta(days=1)
# Set run_every to cover the entire time range unless count query, terms query or agg query used
# This is to prevent query segmenting which unnecessarily slows down tests
if not rule.get('use_terms_query') and not rule.get('use_count_query') and not rule.get('aggregation_query_element'):
conf['run_every'] = endtime - starttime
# Instantiate ElastAlert to use mock config and special rule
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
if args.alert:
client = ElastAlerter(['--verbose'])
else:
client = ElastAlerter(['--debug'])
# Replace get_hits_* functions to use mock data
if args.json:
self.mock_elastalert(client)
# Mock writeback to return empty results
client.writeback_es = mock.MagicMock()
client.writeback_es.search.return_value = {"hits": {"hits": []}}
with mock.patch.object(client, 'writeback') as mock_writeback:
client.run_rule(rule, endtime, starttime)
if mock_writeback.call_count:
if args.formatted_output:
self.formatted_output['writeback'] = {}
else:
print("\nWould have written the following documents to writeback index (default is elastalert_status):\n")
#.........这里部分代码省略.........
示例11: test_file
def test_file(self, args):
""" Loads a rule config file, performs a query over the last day (args.days), lists available keys
and prints the number of results. """
filename = args.file
with open(filename) as fh:
conf = yaml.load(fh)
load_options(conf)
print("Successfully loaded %s\n" % (conf["name"]))
if args.schema_only:
return []
# Set up elasticsearch client and query
es_client = Elasticsearch(host=conf["es_host"], port=conf["es_port"])
start_time = ts_now() - datetime.timedelta(days=args.days)
end_time = ts_now()
ts = conf.get("timestamp_field", "@timestamp")
query = ElastAlerter.get_query(conf["filter"], starttime=start_time, endtime=end_time, timestamp_field=ts)
index = ElastAlerter.get_index(conf, start_time, end_time)
# Get one document for schema
try:
res = es_client.search(index, size=1, body=query, ignore_unavailable=True)
except Exception as e:
print("Error running your filter:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = len(res["hits"]["hits"])
if not num_hits:
return []
terms = res["hits"]["hits"][0]["_source"]
doc_type = res["hits"]["hits"][0]["_type"]
# Get a count of all docs
count_query = ElastAlerter.get_query(
conf["filter"], starttime=start_time, endtime=end_time, timestamp_field=ts, sort=False
)
count_query = {"query": {"filtered": count_query}}
try:
res = es_client.count(index, doc_type=doc_type, body=count_query, ignore_unavailable=True)
except Exception as e:
print("Error querying Elasticsearch:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = res["count"]
print("Got %s hits from the last %s day%s" % (num_hits, args.days, "s" if args.days > 1 else ""))
print("\nAvailable terms in first hit:")
print_terms(terms, "")
# Check for missing keys
pk = conf.get("primary_key")
ck = conf.get("compare_key")
if pk and not lookup_es_key(terms, pk):
print("Warning: primary key %s is either missing or null!", file=sys.stderr)
if ck and not lookup_es_key(terms, ck):
print("Warning: compare key %s is either missing or null!", file=sys.stderr)
include = conf.get("include")
if include:
for term in include:
if not lookup_es_key(terms, term) and "*" not in term:
print("Included term %s may be missing or null" % (term), file=sys.stderr)
for term in conf.get("top_count_keys", []):
# If the index starts with 'logstash', fields with .raw will be available but won't in _source
if term not in terms and not (
term.endswith(".raw") and term[:-4] in terms and index.startswith("logstash")
):
print("top_count_key %s may be missing" % (term), file=sys.stderr)
print("") # Newline
# Download up to 10,000 documents to save
if args.save and not args.count:
try:
res = es_client.search(index, size=10000, body=query, ignore_unavailable=True)
except Exception as e:
print("Error running your filter:", file=sys.stderr)
print(repr(e)[:2048], file=sys.stderr)
return None
num_hits = len(res["hits"]["hits"])
print("Downloaded %s documents to save" % (num_hits))
return res["hits"]["hits"]
return None