本文整理汇总了Python中util.dt_to_ts函数的典型用法代码示例。如果您正苦于以下问题:Python dt_to_ts函数的具体用法?Python dt_to_ts怎么用?Python dt_to_ts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dt_to_ts函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_all_terms
def get_all_terms(self, args):
""" Performs a terms aggregation for each field to get every existing term. """
self.es = Elasticsearch(host=self.rules["es_host"], port=self.rules["es_port"])
window_size = datetime.timedelta(**self.rules.get("terms_window_size", {"days": 30}))
field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE
query_template = {"aggs": {"values": {"terms": field_name}}}
if args and args.start:
end = ts_to_dt(args.start)
else:
end = ts_now()
start = end - window_size
if self.rules.get("use_strftime_index"):
index = format_index(self.rules["index"], start, end)
else:
index = self.rules["index"]
time_filter = {self.rules["timestamp_field"]: {"lte": dt_to_ts(end), "gte": dt_to_ts(start)}}
query_template["filter"] = {"bool": {"must": [{"range": time_filter}]}}
query = {"aggs": {"filtered": query_template}}
for field in self.fields:
field_name["field"] = field
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout=50)
if "aggregations" in res:
buckets = res["aggregations"]["filtered"]["values"]["buckets"]
keys = [bucket["key"] for bucket in buckets]
self.seen_values[field] = keys
elastalert_logger.info("Found %s unique values for %s" % (len(keys), field))
else:
self.seen_values[field] = []
elastalert_logger.info("Found no values for %s" % (field))
示例2: get_all_terms
def get_all_terms(self, args):
""" Performs a terms aggregation for each field to get every existing term. """
self.es = Elasticsearch(host=self.rules['es_host'], port=self.rules['es_port'], timeout=self.rules.get('es_conn_timeout', 50))
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE
query_template = {"aggs": {"values": {"terms": field_name}}}
if args and args.start:
end = ts_to_dt(args.start)
else:
end = ts_now()
start = end - window_size
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], start, end)
else:
index = self.rules['index']
time_filter = {self.rules['timestamp_field']: {'lte': dt_to_ts(end), 'gte': dt_to_ts(start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}}
for field in self.fields:
# For composite keys, we will need to perform sub-aggregations
if type(field) == list:
level = query_template['aggs']
# Iterate on each part of the composite key and add a sub aggs clause to the elastic search query
for i, sub_field in enumerate(field):
level['values']['terms']['field'] = sub_field
if i < len(field) - 1:
# If we have more fields after the current one, then set up the next nested structure
level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
level = level['values']['aggs']
else:
# For non-composite keys, only a single agg is needed
field_name['field'] = field
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
if type(field) == list:
# For composite keys, make the lookup based on all fields
# Make it a tuple since it can be hashed and used in dictionary lookups
self.seen_values[tuple(field)] = []
for bucket in buckets:
# We need to walk down the hierarchy and obtain the value at each level
self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
# If we don't have any results, it could either be because of the absence of any baseline data
# OR it may be because the composite key contained a non-primitive type. Either way, give the
# end-users a heads up to help them debug what might be going on.
if not self.seen_values[tuple(field)]:
elastalert_logger.warning((
'No results were found from all sub-aggregations. This can either indicate that there is '
'no baseline data OR that a non-primitive field was used in a composite key.'
))
else:
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] = keys
elastalert_logger.info('Found %s unique values for %s' % (len(keys), field))
else:
self.seen_values[field] = []
elastalert_logger.info('Found no values for %s' % (field))
示例3: silence
def silence(self):
""" Silence an alert for a period of time. --silence and --rule must be passed as args. """
if self.debug:
logging.error('--silence not compatible with --debug')
exit(1)
if not self.args.rule:
logging.error('--silence must be used with --rule')
exit(1)
# With --rule, self.rules will only contain that specific rule
rule_name = self.rules[0]['name']
try:
unit, num = self.args.silence.split('=')
silence_time = datetime.timedelta(**{unit: int(num)})
# Double conversion to add tzinfo
silence_ts = ts_to_dt(dt_to_ts(silence_time + datetime.datetime.utcnow()))
except (ValueError, TypeError):
logging.error('%s is not a valid time period' % (self.args.silence))
exit(1)
if not self.set_realert(rule_name, silence_ts, 0):
logging.error('Failed to save silence command to elasticsearch')
exit(1)
logging.info('Success. %s will be silenced until %s' % (rule_name, silence_ts))
示例4: get_match_str
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
starttime,
endtime)
return message
示例5: find_recent_pending_alerts
def find_recent_pending_alerts(self, time_limit):
""" Queries writeback_es to find alerts that did not send
and are newer than time_limit """
query = {'query': {'query_string': {'query': 'alert_sent:false'}},
'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
'to': dt_to_ts(ts_now())}}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query,
size=1000)
if res['hits']['hits']:
return res['hits']['hits']
except:
pass
return []
示例6: get_match_str
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
message = ('A maximum of %d unique %s(s) occurred since last alert or '
'between %s and %s\n\n' % (self.rules['max_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
return message
示例7: get_match_str
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
match_ts = lookup_es_key(match, self.ts_field)
starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt)
message = 'At least %d(%d) events occurred between %s and %s\n\n' % (self.rules['num_events'],
match['count'],
starttime,
endtime)
return message
示例8: writeback
def writeback(self, doc_type, body):
# Convert any datetime objects to timestamps
for key in body.keys():
if isinstance(body[key], datetime.datetime):
body[key] = dt_to_ts(body[key])
if self.debug:
elastalert_logger.info("Skipping writing to ES: %s" % (body))
return None
if '@timestamp' not in body:
body['@timestamp'] = dt_to_ts(ts_now())
if self.writeback_es:
try:
res = self.writeback_es.create(index=self.writeback_index,
doc_type=doc_type, body=body)
return res
except ElasticsearchException as e:
logging.exception("Error writing alert info to elasticsearch: %s" % (e))
self.writeback_es = None
示例9: get_match_str
def get_match_str(self, match):
lt = self.rules.get("use_local_time")
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules["timeframe"]), lt)
endtime = pretty_ts(match[self.ts_field], lt)
message = "A maximum of %d unique %s(s) occurred since last alert or " "between %s and %s\n\n" % (
self.rules["max_cardinality"],
self.rules["cardinality_field"],
starttime,
endtime,
)
return message
示例10: add_match
def add_match(self, event):
""" This function is called on all matching events. Rules use it to add
extra information about the context of a match. Event is a dictionary
containing terms directly from elasticsearch and alerts will report
all of the information.
:param event: The matching event, a dictionary of terms.
"""
if '@timestamp' in event:
event['@timestamp'] = dt_to_ts(event['@timestamp'])
self.matches.append(event)
示例11: get_query
def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp'):
""" Returns a query dict that will apply a list of filters, filter by
start and end time, and sort results by timestamp.
:param filters: A list of elasticsearch filters to use.
:param starttime: A timestamp to use as the start time of the query.
:param endtime: A timestamp to use as the end time of the query.
:param sort: If true, sort results by timestamp. (Default True)
:return: A query dictionary to pass to elasticsearch.
"""
starttime = dt_to_ts(starttime)
endtime = dt_to_ts(endtime)
filters = copy.copy(filters)
query = {'filter': {'bool': {'must': filters}}}
if starttime and endtime:
query['filter']['bool']['must'].append({'range': {timestamp_field: {'from': starttime,
'to': endtime}}})
if sort:
query['sort'] = [{timestamp_field: {'order': 'asc'}}]
return query
示例12: add_match
def add_match(self, event):
""" This function is called on all matching events. Rules use it to add
extra information about the context of a match. Event is a dictionary
containing terms directly from elasticsearch and alerts will report
all of the information.
:param event: The matching event, a dictionary of terms.
"""
# Convert datetime's back to timestamps
ts = self.rules.get('timestamp_field')
if ts in event:
event[ts] = dt_to_ts(event[ts])
self.matches.append(event)
示例13: find_recent_pending_alerts
def find_recent_pending_alerts(self, time_limit):
""" Queries writeback_es to find alerts that did not send
and are newer than time_limit """
# XXX only fetches 1000 results. If limit is reached, next loop will catch them
# unless there is constantly more than 1000 alerts to send.
# Fetch recent, unsent alerts that aren't part of an aggregate, earlier alerts first.
query = {'query': {'query_string': {'query': '!_exists_:aggregate_id AND alert_sent:false'}},
'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
'to': dt_to_ts(ts_now())}}},
'sort': {'alert_time': {'order': 'asc'}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query,
size=1000)
if res['hits']['hits']:
return res['hits']['hits']
except: # TODO: Give this a more relevant exception, try:except: is evil.
pass
return []
示例14: get_match_str
def get_match_str(self, match):
ts = match[self.rules['timestamp_field']]
lt = self.rules.get('use_local_time')
try:
match_value = self.match_value[-1][:5]
except:
match_value = []
message = "Between %s and %s\n" % (pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt), pretty_ts(ts, lt))
message += "%s(%s) %s %s\nmatch value:\n\t%s...\n\n" % (
self.rules['stat'],
self.rules['stat_field'],
self.rules['stat_type'],
self.rules['threshold'],
'\n\t'.join(match_value)
)
return message
示例15: add_match
def add_match(self, event):
""" This function is called on all matching events. This rule uses it
to add events related to the matched event, if requested by
configuration of the rule (related_events flag)
:param event: The matching event, a dictionary of terms.
"""
# Convert datetime's back to timestamps
ts = self.rules.get('timestamp_field')
if ts in event:
event[ts] = dt_to_ts(event[ts])
if self.attach_related and len(self.matches):
# we need to add related events, and matches array already
# has a field - then add a given event into related_events array
if not self.matches[0].get('related_events'):
self.matches[0]['related_events'] = []
self.matches[0]['related_events'].append(event)
else:
self.matches.append(event)