本文整理汇总了Python中mozdef_util.utilities.toUTC.toUTC函数的典型用法代码示例。如果您正苦于以下问题:Python toUTC函数的具体用法?Python toUTC怎么用?Python toUTC使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了toUTC函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_defaults
def verify_defaults(self, result):
assert result['category'] == 'suricata'
assert result['eventsource'] == 'nsm'
assert toUTC(result['receivedtimestamp']).isoformat() == result['receivedtimestamp']
assert result['severity'] == 'INFO'
assert 'event_type' in result
assert 'source' in result
assert toUTC(result['timestamp']).isoformat() == result['timestamp']
assert toUTC(result['utctimestamp']).isoformat() == result['utctimestamp']
示例2: esCloseIndices
def esCloseIndices():
logger.debug('started')
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_indices()
except Exception as e:
logger.error("Unhandled exception while connecting to ES, terminating: %r" % (e))
# examine each index pulled from get_indice
# to determine if it meets aging criteria
month_ago_date = toUTC(datetime.now()) - timedelta(days=int(options.index_age))
month_ago_date = month_ago_date.replace(tzinfo=None)
for index in indices:
if 'events' in index:
index_date = index.rsplit('-', 1)[1]
logger.debug("Checking to see if Index: %s can be closed." % (index))
if len(index_date) == 8:
index_date_obj = datetime.strptime(index_date, '%Y%m%d')
try:
if month_ago_date > index_date_obj:
logger.debug("Index: %s will be closed." % (index))
es.close_index(index)
else:
logger.debug("Index: %s does not meet aging criteria and will not be closed." % (index))
except Exception as e:
logger.error("Unhandled exception while closing indices, terminating: %r" % (e))
示例3: esPruneIndexes
def esPruneIndexes():
if options.output == 'syslog':
logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
else:
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.debug('started')
try:
es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
indices = es.get_indices()
# do the pruning
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
try:
if pruning != '0':
index_to_prune = index
if rotation == 'daily':
idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
index_to_prune += '-%s' % idate
elif rotation == 'monthly':
idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
index_to_prune += '-%s' % idate
if index_to_prune in indices:
logger.debug('Deleting index: %s' % index_to_prune)
es.delete_index(index_to_prune, True)
else:
logger.error('Error deleting index %s, index missing' % index_to_prune)
except Exception as e:
logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))
except Exception as e:
logger.error("Unhandled exception, terminating: %r" % e)
示例4: createAlertDict
def createAlertDict(
self,
summary,
category,
tags,
events,
severity="NOTICE",
url=None,
ircchannel=None,
):
"""
Create an alert dict
"""
alert = {
"utctimestamp": toUTC(datetime.now()).isoformat(),
"severity": severity,
"summary": summary,
"category": category,
"tags": tags,
"events": [],
"ircchannel": ircchannel,
}
if url:
alert["url"] = url
for e in events:
alert["events"].append(
{
"documentindex": e["_index"],
"documentsource": e["_source"],
"documentid": e["_id"],
}
)
self.log.debug(alert)
return alert
示例5: on_message
def on_message(self, body, message):
try:
# just to be safe..check what we were sent.
if isinstance(body, dict):
body_dict = body
elif isinstance(body, str) or isinstance(body, unicode):
try:
body_dict = json.loads(body) # lets assume it's json
except ValueError as e:
# not json..ack but log the message
logger.exception("mozdefbot_slack exception: unknown body type received %r" % body)
return
else:
logger.exception("mozdefbot_slack exception: unknown body type received %r" % body)
return
if 'notify_mozdefbot' in body_dict and body_dict['notify_mozdefbot'] is False:
# If the alert tells us to not notify, then don't post message
message.ack()
return
# process valid message
# see where we send this alert
channel = options.default_alert_channel
if 'ircchannel' in body_dict:
if body_dict['ircchannel'] in options.channels:
channel = body_dict['ircchannel']
# see if we need to delay a bit before sending the alert, to avoid
# flooding the channel
if self.lastalert is not None:
delta = toUTC(datetime.now()) - self.lastalert
logger.info('new alert, delta since last is {}\n'.format(delta))
if delta.seconds < 2:
logger.info('throttling before writing next alert\n')
time.sleep(1)
self.lastalert = toUTC(datetime.now())
if len(body_dict['summary']) > 450:
logger.info('alert is more than 450 bytes, truncating\n')
body_dict['summary'] = body_dict['summary'][:450] + ' truncated...'
logger.info("Posting alert: {0}".format(body_dict['summary']))
self.bot.post_alert_message(body_dict, channel)
message.ack()
except ValueError as e:
logger.exception("mozdefbot_slack exception while processing events queue %r" % e)
示例6: main
def main(self):
search_query = SearchQuery(hours=6)
day_old_date = toUTC(datetime.now() - timedelta(days=1)).isoformat()
search_query.add_must(LessThanMatch('utctimestamp', day_old_date))
self.filtersManual(search_query)
self.searchEventsAggregated('mozdefhostname', samplesLimit=1000)
self.walkAggregations(threshold=1)
示例7: genNewAttacker
def genNewAttacker():
newAttacker = dict()
newAttacker['_id'] = genMeteorID()
newAttacker['lastseentimestamp'] = toUTC(datetime.now())
newAttacker['firstseentimestamp'] = toUTC(datetime.now())
newAttacker['eventscount'] = 0
newAttacker['alerts'] = list()
newAttacker['alertscount'] = 0
newAttacker['category'] = 'unknown'
newAttacker['score'] = 0
newAttacker['geocoordinates'] = dict(countrycode='', longitude=0, latitude=0)
newAttacker['tags'] = list()
newAttacker['notes'] = list()
newAttacker['indicators'] = list()
newAttacker['attackphase'] = 'unknown'
newAttacker['datecreated'] = toUTC(datetime.now())
newAttacker['creator'] = sys.argv[0]
return newAttacker
示例8: test_add_required_fields_default
def test_add_required_fields_default(self):
mock_class = MockHostname()
socket.gethostname = mock_class.hostname
self.event.add_required_fields()
assert self.event['receivedtimestamp'] is not None
assert toUTC(self.event['receivedtimestamp']).isoformat() == self.event['receivedtimestamp']
assert self.event['utctimestamp'] is not None
assert toUTC(self.event['utctimestamp']).isoformat() == self.event['utctimestamp']
assert self.event['timestamp'] is not None
assert toUTC(self.event['timestamp']).isoformat() == self.event['timestamp']
assert self.event['mozdefhostname'] == 'randomhostname'
assert self.event['tags'] == []
assert self.event['category'] == 'UNKNOWN'
assert self.event['hostname'] == 'UNKNOWN'
assert self.event['processid'] == 'UNKNOWN'
assert self.event['processname'] == 'UNKNOWN'
assert self.event['severity'] == 'UNKNOWN'
assert self.event['source'] == 'UNKNOWN'
assert self.event['summary'] == 'example summary'
assert self.event['tags'] == []
assert self.event['details'] == {}
示例9: execute
def execute(self, elasticsearch_client, indices=['events', 'events-previous'], size=1000, request_timeout=30):
if self.must == [] and self.must_not == [] and self.should == [] and self.aggregation == []:
raise AttributeError('Must define a must, must_not, should query, or aggregation')
if self.date_timedelta:
end_date = toUTC(datetime.now())
begin_date = toUTC(datetime.now() - timedelta(**self.date_timedelta))
utc_range_query = RangeMatch('utctimestamp', begin_date, end_date)
received_range_query = RangeMatch('receivedtimestamp', begin_date, end_date)
range_query = utc_range_query | received_range_query
self.add_must(range_query)
search_query = None
search_query = BooleanMatch(must=self.must, must_not=self.must_not, should=self.should)
results = []
if len(self.aggregation) == 0:
results = elasticsearch_client.search(search_query, indices, size, request_timeout)
else:
results = elasticsearch_client.aggregated_search(search_query, indices, self.aggregation, size, request_timeout)
return results
示例10: run
def run(self):
while True:
try:
curRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptbackoff)
records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime)
# update last request time for the next request
self.lastRequestTime = curRequestTime
for msgid in records:
msgdict = records[msgid]
# strip any line feeds from the message itself, we just convert them
# into spaces
msgdict['message'] = msgdict['message'].replace('\n', ' ').replace('\r', '')
event = dict()
event['tags'] = ['papertrail', options.ptacctname]
event['details'] = msgdict
if 'generated_at' in event['details']:
event['utctimestamp'] = toUTC(event['details']['generated_at']).isoformat()
if 'hostname' in event['details']:
event['hostname'] = event['details']['hostname']
if 'message' in event['details']:
event['summary'] = event['details']['message']
if 'severity' in event['details']:
event['severity'] = event['details']['severity']
if 'source_ip' in event['details']:
event['sourceipaddress'] = event['details']['source_ip']
else:
event['severity'] = 'INFO'
event['category'] = 'syslog'
# process message
self.on_message(event, msgdict)
time.sleep(options.ptinterval)
except ValueError as e:
logger.exception('Exception while handling message: %r' % e)
示例11: convert_key_date_format
def convert_key_date_format(self, needle, haystack):
num_levels = needle.split(".")
if len(num_levels) == 0:
return False
current_pointer = haystack
for updated_key in num_levels:
if updated_key == num_levels[-1]:
current_pointer[updated_key] = toUTC(
current_pointer[updated_key]).isoformat()
return haystack
if updated_key in current_pointer:
current_pointer = current_pointer[updated_key]
else:
return haystack
示例12: updateMongo
def updateMongo(mozdefdb, esAlerts):
alerts = mozdefdb['alerts']
for a in esAlerts['hits']:
# insert alert into mongo if we don't already have it
alertrecord = alerts.find_one({'esmetadata.id': a['_id']})
if alertrecord is None:
# new record
mrecord = a['_source']
# generate a meteor-compatible ID
mrecord['_id'] = genMeteorID()
# capture the elastic search meta data (index/id/type)
# set the date back to a datetime from unicode, so mongo/meteor can properly sort, select.
mrecord['utctimestamp']=toUTC(mrecord['utctimestamp'])
# also set an epoch time field so minimongo can sort
mrecord['utcepoch'] = calendar.timegm(mrecord['utctimestamp'].utctimetuple())
mrecord['esmetadata'] = dict()
mrecord['esmetadata']['id'] = a['_id']
mrecord['esmetadata']['index'] = a['_index']
alerts.insert(mrecord)
示例13: esSearch
def esSearch(es):
search_query = SearchQuery(minutes=options.aggregationminutes)
search_query.add_aggregation(Aggregation('category'))
results = search_query.execute(es)
mozdefstats = dict(utctimestamp=toUTC(datetime.now()).isoformat())
mozdefstats['category'] = 'stats'
mozdefstats['hostname'] = socket.gethostname()
mozdefstats['mozdefhostname'] = mozdefstats['hostname']
mozdefstats['severity'] = 'INFO'
mozdefstats['source'] = 'mozdef'
mozdefstats['tags'] = ['mozdef', 'stats']
mozdefstats['summary'] = 'Aggregated category counts'
mozdefstats['processid'] = os.getpid()
mozdefstats['processname'] = sys.argv[0]
mozdefstats['details'] = dict(counts=list())
for bucket in results['aggregations']['category']['terms']:
entry = dict()
entry[bucket['key']] = bucket['count']
mozdefstats['details']['counts'].append(entry)
return mozdefstats
示例14: createAlertDict
def createAlertDict(self, summary, category, tags, events, severity='NOTICE', url=None, ircchannel=None):
"""
Create an alert dict
"""
alert = {
'utctimestamp': toUTC(datetime.now()).isoformat(),
'severity': severity,
'summary': summary,
'category': category,
'tags': tags,
'events': [],
'ircchannel': ircchannel,
}
if url:
alert['url'] = url
for e in events:
alert['events'].append({
'documentindex': e['_index'],
'documentsource': e['_source'],
'documentid': e['_id']})
self.log.debug(alert)
return alert
示例15: onMessage
def onMessage(self, message, metadata):
if 'tags' not in message:
return (message, metadata)
if 'githubeventsqs' not in message['tags']:
return (message, metadata)
newmessage = {}
newmessage['details'] = {}
newmessage['category'] = 'github'
newmessage['tags'] = ['github', 'webhook']
newmessage['eventsource'] = 'githubeventsqs'
if 'event' in message['details']:
newmessage['source'] = message['details']['event']
else:
newmessage['source'] = 'UNKNOWN'
if 'request_id' in message['details']:
newmessage['details']['request_id'] = message['details']['request_id']
else:
newmessage['details']['request_id'] = 'UNKNOWN'
# iterate through top level keys - push, etc
if newmessage['source'] in self.eventtypes:
for key in self.yap[newmessage['source']]:
mappedvalue = jmespath.search(self.yap[newmessage['source']][key], message)
# JMESPath likes to silently return a None object
if mappedvalue is not None:
newmessage['details'][key] = mappedvalue
if 'commit_ts' in newmessage['details']:
newmessage['timestamp'] = newmessage['details']['commit_ts']
newmessage['utctimestamp'] = toUTC(newmessage['details']['commit_ts']).isoformat()
else:
newmessage = None
return (newmessage, metadata)