本文整理汇总了Python中pyflag.pyflaglog.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: external_process
def external_process(self, fd):
## Read all the events from the file:
pyflaglog.log(pyflaglog.DEBUG, "Processing %s as mork" % self.fd.inode)
dbh = DB.DBO(self.case)
inode_id = self.fd.lookup_id()
h = MozHist.MozHist(fd=fd)
context = None
while 1:
token = h.next_token()
if not token: break
if token=='EVENT_END':
e = h.event
if not context:
if "formhistory" in h.types['80']:
context = 'form'
else:
context = 'history'
if context == 'form':
try:
dbh.insert('mozilla_form_history',
inode_id = inode_id,
id = e['id'],
name = e['Name'],
value = e['Value'])
except KeyError: continue
else:
try:
result = dict(
inode_id = inode_id,
url = e['URL'],
_LastVisitDate = "from_unixtime('%s')" % e['LastVisitDate'][:10],
_FirstVisitDate = "from_unixtime('%s')" % e['FirstVisitDate'][:10],
)
except KeyError:
continue
try: result['id'] = int(e['id'])
except: pass
try: result['Typed'] = e['Typed']
except: pass
try: result['Referrer'] = e['Referrer']
except: pass
try: result['VisitCount'] = e['VisitCount']
except: pass
try: result['name'] = e['Name']
except: pass
try: result['host'] = e['Hostname']
except: pass
dbh.insert('mozilla_history', **result)
示例2: reset_all
def reset_all(**query):
""" This searchs for all executed reports with the provided parameters in them and resets them all.
Callers need to provide at least a report name, case and a family or an exception is raised.
"""
flag = GLOBAL_FLAG_OBJ
report =Registry.REPORTS.dispatch(query['family'],query['report'])
dbh=DB.DBO(query['case'])
family=query['family'].replace(" ","%20")
dbh.execute("select value from meta where property='report_executed' and value like '%%family=%s%%'" % family)
for row in dbh:
import cgi
q = query_type(string=row['value'],case=query['case'])
try:
for k in query.keys():
if k=='case': continue
if q[k]!=query[k]:
raise KeyError()
## This report should now be reset:
pyflaglog.log(pyflaglog.DEBUG, "Will now reset %s" % row['value'])
print "Resetting %s" % query
try:
report=report(flag)
except:
pass
report.reset(q)
dbh2 = DB.DBO(query['case'])
dbh2.execute("delete from meta where property='report_executed' and value=%r",row['value'])
except KeyError:
pass
示例3: put
def put(self,object, prefix='', key=None):
""" Stores an object in the Store. Returns the key for the
object. If key is already supplied we use that instead - Note
that we do not check that it doesnt already exist.
"""
self.mutex.acquire()
try:
## Ensure that we have enough space:
self.check_full()
## Push the item in:
now = time.time()
if not key:
key = "%s%s" % (prefix,self.id)
self.creation_times.append([now,key, object])
self.id+=1
finally:
self.mutex.release()
pyflaglog.log(pyflaglog.VERBOSE_DEBUG,
"Stored key %s: %s" % (key,
("%r" % (object,))[:100]))
return key
示例4: external_process
def external_process(self, fd):
pyflaglog.log(pyflaglog.DEBUG,"Opening %s for Hotmail processing" % self.fd.inode)
## Now we should be able to parse the data out:
self.process_send_message(fd)
self.process_editread(fd)
self.process_readmessage(fd)
self.process_mail_listing()
示例5: read
def read(self, length=None):
try:
return File.read(self,length)
except IOError:
pass
if not self.gz:
self.fd.seek(0)
self.gz = gzip.zlib.decompressobj(-15)
count = 0
step = 1024
result = ''
## Copy ourself into the file - This is in case we have errors
## in the file, we try to read as much as possible:
while 1:
try:
data=self.gz.decompress(self.fd.read(step))
except IOError,e:
step /= 2
if step<10:
pyflaglog.log(pyflaglog.DEBUG, "Error reading from %s, could only get %s bytes" % (self.fd.inode, count));
break
else:
continue
except Exception, e:
pyflaglog.log(pyflaglog.WARNING, "Unable to decompress inode %s" % e)
break
示例6: add_inode
def add_inode(self, fd, offset, factories):
""" We think we have a zip file here. """
b = Zip.Buffer(fd=fd)[offset:]
try:
header = Zip.ZipFileHeader(b)
size = int(header['uncompr_size'])
compressed_length = int(header['compr_size'])
## Some zip programs seem to leave this at 0 - because its
## already in the central directory. Unfortunately the
## carver currently does not look at the central directory
## - so we just make it a reasonable value
if compressed_length==0:
compressed_length = 100*1024
name = header['zip_path'].get_value()
if len(name)==0 or invalid_filename.search(name):
pyflaglog.log(pyflaglog.DEBUG, "Thought the name %r is invalid - skipping file" % name[:10])
return 10
header_offset = header['data'].buffer.offset
except:
return 10
new_inode = "%s|Z%s:%s" % (fd.inode, offset, compressed_length)
self._add_inode(new_inode, size, name, fd, factories)
return size
示例7: get
def get(self, key, remove=False):
""" Retrieve the key from the store.
If remove is specified we remove it from the Store altogether.
"""
## FIXME: This is slow for large stores... use a dict for
## quick reference:
self.mutex.acquire()
try:
## Find and remove the object from the store
i=0
for t, k, obj in self.creation_times:
if k==key:
## Remove the object from the store:
t, k, obj = self.creation_times.pop(i)
## Reinsert it into the cache at the most recent
## time:
if not remove:
self.creation_times.append([time.time(), k, obj])
self.check_full()
pyflaglog.log(pyflaglog.VERBOSE_DEBUG,
"Got key %s: %s" % (key,
("%r" % (obj,))[:100]))
return obj
i+=1
## If we are here we could not find the key:
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Key %s not found" % (key,))
raise KeyError("Key not found %s" % (key,))
finally:
self.mutex.release()
示例8: scan
def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
if scores.get('GmailStreamMagic',0) == 0:
return
pyflaglog.log(pyflaglog.DEBUG,"Opening %s for Gmail processing" % fd.inode_id)
self.current_time = None
self.current_box = 'Unknown'
if "html" in mime:
html_parser = HTML.HTMLParser()
html_parser.parse_fd(fd)
html_parser.close()
## Process all script segments
for script_tag in html_parser.root.search("script"):
script = script_tag.innerHTML()
try:
j=Javascript.JSParser()
j.feed(script)
j.close()
except: continue
self.process_js(j.root, fd)
elif "javascript" in mime:
## Make a new parser
j=Javascript.JSParser()
j.parse_fd(fd)
j.close()
self.process_js(j.root, fd)
示例9: execute
def execute(self,string):
self.py_row_cache = []
self.py_cache_size = 10
self._last_executed = string
self._last_executed_sequence.append(string)
self._last_executed_sequence = self._last_executed_sequence[:-3]
def cancel():
pyflaglog.log(pyflaglog.WARNINGS, "Killing query in thread %s because it took too long" % self.connection.thread_id())
self.kill_connection('query')
if self.timeout:
t = threading.Timer(self.timeout, cancel)
t.start()
try:
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, string)
MySQLdb.cursors.SSDictCursor.execute(self,string)
finally:
t.cancel()
t.join()
pass
else:
if self.logged:
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, string)
MySQLdb.cursors.SSDictCursor.execute(self,string)
示例10: process_stream
def process_stream(self, stream, factories):
combined_inode = "I%s|S%s/%s" % (stream.fd.name, stream.inode_id, stream.reverse)
pyflaglog.log(pyflaglog.DEBUG,"Openning %s for SMTP" % combined_inode)
## We open the file and scan it for emails:
fd = self.fsfd.open(inode=combined_inode)
dbh=DB.DBO(self.case)
p=SMTP(fd,dbh,self.fsfd)
## Iterate over all the messages in this connection
for f in p.parse():
if not f: continue
## message number and its offset:
count, offset, length = f
## Create the VFS node:
path, combined_inode, inode_id =self.fsfd.lookup(inode=combined_inode)
path=posixpath.normpath(path+"/../../../../../")
new_inode="%s|o%s:%s" % (combined_inode,offset,length)
ds_timestamp = Time.convert(stream.ts_sec, case=self.case, evidence_tz="UTC")
date_str = ds_timestamp.split(" ")[0]
self.fsfd.VFSCreate(None, new_inode,
"%s/SMTP/%s/Message_%s" % (path,
date_str,
count),
mtime = stream.ts_sec, size=length
)
## Scan the new file using the scanner train. If
## the user chose the RFC2822 scanner, we will be
## able to understand this:
self.scan_as_file(new_inode, factories)
示例11: _warning_check
def _warning_check(self):
""" We need to override this because for some cases it issues
a SHOW WARNINGS query. Which will raise an 'out of sync
error' when we operate in SS. This is a most sane approach -
when warnings are detected, we simply try to drain the
resultsets and then read the warnings.
"""
if self.ignore_warnings: return
## We have warnings to show
if self._warnings:
last_executed = [ x[:500] for x in self._last_executed_sequence]
results = list(self._fetch_row(1000))
if len(results)<1000:
self.execute("SHOW WARNINGS")
while 1:
a=self.fetchone()
if not a: break
pyflaglog.log(pyflaglog.DEBUG,"Mysql warnings: query %r: %s" % (last_executed,a))
else:
pyflaglog.log(pyflaglog.DEBUG,"Mysql issued warnings but we are unable to drain result queue")
## If we have strict SQL we abort on warnings:
if config.STRICTSQL:
raise DBError(a)
self.py_row_cache.extend(results)
示例12: check_index
def check_index(self, table, key, idx_type='', length=None):
""" This checks the database to ensure that the said table has an index on said key.
If an index is missing, we create it here, so we always ensure an index exists once we return. """
## We implement a local cache to ensure that we dont hit the
## DB all the time:
cache_key = "%s/%s" % (self.case,table)
try:
## These should be the fields with the indexes on them:
fields = DBIndex_Cache.get(cache_key)
except KeyError:
self.execute("show index from `%s`",table)
fields = [ row['Key_name'] for row in self]
DBIndex_Cache.put(fields, key=cache_key)
## Now fields is an array stored in the Store - we can append
## to it directly because we also hold a reference here and it
## will affect the next value gotten from the Store:
if key not in fields:
if length:
sql="(`%s`(%s))" % (key,length)
else:
sql="(`%s`)" % (key)
pyflaglog.log(pyflaglog.VERBOSE_DEBUG,"Oops... No index found in table %s on field %s - Generating index, this may take a while" %(table,key))
## Index not found, we make it here:
self.execute("Alter table `%s` add index %s %s",(table,idx_type,sql))
## Add to cache:
fields.append(key)
示例13: external_process
def external_process(self, fd):
if self.mime_type == "application/x-winnt-registry":
print "Grabbing message sources from %s" % self.fd.inode
## populate the EventMessageSources table from the registry
dbh=DB.DBO(self.case)
pydbh = DB.DBO()
inode_id = self.fd.lookup_id()
dbh.execute("select * from reg where reg_key='EventMessageFile' and inode_id=%r", inode_id)
for row in dbh:
service = os.path.basename(os.path.normpath(row['path']))
pydbh.execute("select * from EventMessageSources where source=%r limit 1",service)
pyrow=pydbh.fetch()
if not pyrow:
filename = row['value'].split("\\")[-1].lower()
pydbh.execute("insert ignore into EventMessageSources set filename=%r, source=%r" , (filename, service))
return
filename, inode, inode_id = self.ddfs.lookup(inode=self.inode)
b = Buffer(fd=fd)
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Opening %s to extract messages" % self.inode)
pydbh = DB.DBO()
pydbh.mass_insert_start('EventMessages')
try:
m=PElib.get_messages(b)
for k,v in m.messages.items():
pydbh.mass_insert(filename = os.path.basename(filename),
message_id = k,
message = v['Message'],
offset = v.buffer.offset,
)
except (IndexError, IOError, AttributeError):
pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "%s does not contain messages" % filename)
示例14: drop_table
def drop_table(case, name):
""" Drops the log table tablename """
if not name: return
dbh = DB.DBO(case)
pyflaglog.log(pyflaglog.DEBUG, "Dropping log table %s in case %s" % (name, case))
dbh.execute("select * from log_tables where table_name = %r limit 1" , name)
row = dbh.fetch()
## Table not found
if not row:
return
preset = row['preset']
## Get the driver for this table:
log = load_preset(case, preset)
log.drop(name)
## Ask the driver to remove its table:
dbh.delete("log_tables",
where= DB.expand("table_name = %r ", name));
## Make sure that the reports get all reset
FlagFramework.reset_all(family='Load Data', report="Load Preset Log File",
table = name, case=case)
示例15: external_process
def external_process(self, fd):
pyflaglog.log(pyflaglog.DEBUG, "Opening %s for MMS Processing" % self.fd.inode)
try:
message = mms.MMSMessage.fromFile(fd.name)
except:
pyflaglog.log(pyflaglog.DEBUG, "Error parsing %s" % self.fd.inode)
return
result = {'type': 'Sent', 'message': ''}
for k,v in [ ('From', 'From'),
('To', 'To'),
('Data', 'sent'),
('Subject', 'subject')
]:
try:
result[v] = message.headers[k]
except KeyError:
pass
## Create a new webmail message:
inode_id = self.insert_message(result)
dbh = DB.DBO(self.fd.case)
count = 0
for part in message.dataParts:
count +=1
if part.contentType.startswith('text/'):
result['message'] += part.data
dbh.update('webmail_messages', where='inode_id="%s"' % inode_id,
message = result['message'])
elif not part.contentType.endswith('smil'):
new_inode = self.fd.inode + "|m%s" % count
filename = CacheManager.MANAGER.get_temp_path(self.fd.case, new_inode)
fd = open(filename,"wb")
fd.write(part.data)
fd.close()
## Add Attachment
path, inode, inode_id = self.ddfs.lookup(inode_id = inode_id)
attachment_id = self.ddfs.VFSCreate(None,
new_inode,
"%s/Message %s" % (path,count),
size = len(part.data))
parameters = {}
for hdr in part.headers:
value = part.headers[hdr]
if type(value) == tuple:
if len(value[1]) > 0:
parameters = value[1]
filename = parameters.get("Filename", parameters.get("Name","output.bin"))
dbh.insert("webmail_attachments",
inode_id = inode_id,
attachment = attachment_id,
url = filename)