本文整理匯總了Python中haystack.config.Config.getCacheFilename方法的典型用法代碼示例。如果您正苦於以下問題:Python Config.getCacheFilename方法的具體用法?Python Config.getCacheFilename怎麽用?Python Config.getCacheFilename使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類haystack.config.Config
的用法示例。
在下文中一共展示了Config.getCacheFilename方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: getAllPointers
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def getAllPointers(dumpfilename, mappings):
''' Search all mmap pointers values in heap.
records values and pointers address in heap.
'''
import pointerfinder
F_HEAP_O = Config.getCacheFilename(Config.CACHE_ALL_PTRS_ADDRS, dumpfilename)
F_HEAP_V = Config.getCacheFilename(Config.CACHE_ALL_PTRS_VALUES, dumpfilename)
heap_addrs = int_array_cache(F_HEAP_O)
heap_values = int_array_cache(F_HEAP_V)
if heap_addrs is None or heap_values is None:
log.info('[+] Making new cache - all pointers')
heap_enumerator = pointerfinder.PointerEnumerator(mappings.getHeap())
heap_enumerator.setTargetMapping(mappings) # all pointers
heap_enum = heap_enumerator.search()
if len(heap_enum)>0:
heap_addrs, heap_values = zip(*heap_enum) # WTF
else:
heap_addrs, heap_values = (),()
log.info('\t[-] got %d pointers '%(len(heap_enum)) )
# merge
int_array_save(F_HEAP_O, heap_addrs)
int_array_save(F_HEAP_V, heap_values)
else:
log.info('[+] Loading from cache %d pointers %d unique'%(len(heap_values), len(set(heap_values)) ))
return heap_addrs, heap_values
示例2: getAllocations
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def getAllocations(dumpfilename, mappings, heap, get_user_alloc=None):
''' Search malloc_chunks in heap .
records addrs and sizes.
'''
# TODO if linux
# TODO from haystack.reverse import heapwalker
import libc.ctypes_malloc
f_addrs = Config.getCacheFilename('%x.%s'%(heap.start,Config.CACHE_MALLOC_CHUNKS_ADDRS), dumpfilename)
f_sizes = Config.getCacheFilename('%x.%s'%(heap.start,Config.CACHE_MALLOC_CHUNKS_SIZES), dumpfilename)
log.debug('reading from %s'%(f_addrs))
addrs = int_array_cache(f_addrs)
sizes = int_array_cache(f_sizes)
if addrs is None or sizes is None:
log.info('[+] Making new cache - getting malloc_chunks from heap ')
### TODO : HeapWalker + order addresses ASC ...
# allocations = sorted(heapwalker.get_user_allocations(mappings, heap))
## TODO 2 , allocations should be triaged by mmapping ( heap.start ) before write2disk.
## Or the heap.start should be removed from the cache name.. it has no impact.
## heapwalker.getuserAllocations should parse ALL mmappings to get all user allocations.
### But in that case, there will/could be a problem when using utils.closestFloorValue...
### in case of a pointer ( bad allocation ) out of a mmapping space.
### But that is not possible, because we are reporting factual reference to existing address space.
### OK. heap.start should be deleted from the cache name.
allocations = mappings.get_user_allocations(mappings, heap)
addrs, sizes = zip(*allocations)
int_array_save(f_addrs, addrs)
int_array_save(f_sizes, sizes)
else:
log.info('[+] Loading from cache')
log.info('\t[-] we have %d malloc_chunks'%(len(addrs)) )
return addrs, sizes
示例3: getHeapPointers
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def getHeapPointers(dumpfilename, mappings):
''' Search Heap pointers values in stack and heap.
records values and pointers address in heap.
'''
import pointerfinder
#F_VALUES = Config.getCacheFilename(Config.CACHE_HS_POINTERS_VALUES, dumpfilename)
F_HEAP_O = Config.getCacheFilename(Config.CACHE_HEAP_ADDRS, dumpfilename)
F_HEAP_V = Config.getCacheFilename(Config.CACHE_HEAP_VALUES, dumpfilename)
#F_STACK_O = Config.getCacheFilename(Config.CACHE_STACK_ADDRS, dumpfilename)
#F_STACK_V = Config.getCacheFilename(Config.CACHE_STACK_VALUES, dumpfilename)
#log.debug('reading from %s'%(F_VALUES))
#values = int_array_cache(F_VALUES)
heap_addrs = int_array_cache(F_HEAP_O)
heap_values = int_array_cache(F_HEAP_V)
#stack_addrs = int_array_cache(F_STACK_O)
#stack_values = int_array_cache(F_STACK_V)
if heap_addrs is None or heap_values is None:
log.info('[+] Making new cache - heap pointers') #- getting pointers values from stack')
#stack_enumerator = pointerfinder.PointerEnumerator(mappings.getStack())
#stack_enumerator.setTargetMapping(mappings.getHeap()) #only interested in heap pointers
#stack_enum = stack_enumerator.search()
#if len(stack_enum)>0:
# stack_offsets, stack_values = zip(*stack_enum)
#else:
# stack_offsets, stack_values = (),()
#log.info('\t[-] got %d pointers '%(len(stack_enum)) )
#log.info('\t[-] merging pointers from heap')
heap_enum = pointerfinder.PointerEnumerator(mappings.getHeap()).search()
if len(heap_enum)>0:
heap_addrs, heap_values = zip(*heap_enum) # WTF
else:
heap_addrs, heap_values = (),()
log.info('\t[-] got %d pointers '%(len(heap_enum)) )
# merge
#values = sorted(set(heap_values+stack_values))
#int_array_save(F_VALUES , values)
int_array_save(F_HEAP_O, heap_addrs)
int_array_save(F_HEAP_V, heap_values)
#int_array_save(F_STACK_O, stack_addrs)
#int_array_save(F_STACK_V, stack_values)
#log.info('\t[-] we have %d unique pointers values out of %d orig.'%(len(values), len(heap_values)+len(stack_values)) )
else:
log.info('[+] Loading from cache %d pointers %d unique'%(len(heap_values), len(set(heap_values)) ))
#log.info('\t[-] we have %d unique pointers values, and %d pointers in heap .'%(len(values), len(heap_addrs)) )
#aligned = numpy.asarray(filter(lambda x: (x%4) == 0, values))
#not_aligned = numpy.asarray(sorted( set(values)^set(aligned)))
#log.info('\t[-] only %d are aligned values.'%(len(aligned) ) )
return heap_addrs, heap_values #, stack_addrs, stack_values #values, aligned, not_aligned
示例4: _reverse
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def _reverse(self, context):
import networkx
#import code
#code.interact(local=locals())
graph = networkx.DiGraph()
graph.add_nodes_from([ '%x'%k for k in context.listStructuresAddresses()]) # we only need the addresses...
log.info('[+] Graph - added %d nodes'%(graph.number_of_nodes()))
t0 = time.time()
tl = t0
for i, ptr_value in enumerate(context.listStructuresAddresses()) :
struct = context.getStructureForAddr(ptr_value)
#targets = set(( '%x'%ptr_value, '%x'%child.target_struct_addr ) for child in struct.getPointerFields()) #target_struct_addr
targets = set(( '%x'%ptr_value, '%x'%child._child_addr ) for child in struct.getPointerFields()) #target_struct_addr
## DEBUG
if len(struct.getPointerFields()) >0:
if len(targets) == 0:
raise ValueError
## DEBUG
graph.add_edges_from( targets )
if time.time()-tl > 30:
tl = time.time()
rate = ((tl-t0)/(i)) #if decoded else ((tl-t0)/(fromcache))
log.info('%2.2f secondes to go (g:%d)'%(
(len(graph)-(i))*rate, i ) )
log.info('[+] Graph - added %d edges'%(graph.number_of_edges()))
networkx.readwrite.gexf.write_gexf( graph, Config.getCacheFilename(Config.CACHE_GRAPH, context.dumpname))
context.parsed.add(str(self))
return
示例5: reverseLocalFonctionPointerNames
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def reverseLocalFonctionPointerNames(context):
''' reverse fn pointer names by trying to rebase the ptr value to a local ld_open.
load local memdump
map all librairies
go through all pointers in librairies
try to dl_addr the pointers by rebasing.
'''
fsave = Config.getCacheFilename(Config.CACHE_FUNCTION_NAMES, context.dumpname)
if os.access(fsave, os.F_OK):
import pickle
vtable = pickle.load(file(fsave,'rb'))
for x in vtable.items():
yield x
raise StopIteration
import ctypes
IGNORES = ['None', '[heap]', '[stack]','[vdso]']
# XXX this is not portable.
libdl = ctypes.CDLL('libdl.so')
def getname(fnaddr):
info = Dl_info()
ret = libdl.dladdr( fnaddr, ctypes.byref(info))
return info.dli_sname.string, info.dli_saddr
mappings = context.mappings
ldso = dict()
for m in mappings:
if m.pathname not in IGNORES and m.pathname not in ldso:
try:
ldso[m.pathname] = ctypes.CDLL(m.pathname)
except OSError,e:
IGNORES.append(m.pathname)
示例6: cacheSizes
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def cacheSizes(self):
"""Find the number of different sizes, and creates that much numpyarray"""
# if not os.access
outdir = Config.getCacheFilename(
Config.CACHE_SIGNATURE_SIZES_DIR,
self._context.dumpname)
if not os.path.isdir(outdir):
os.mkdir(outdir)
if not os.access(outdir, os.W_OK):
raise IOError('cant write to %s' % (outdir))
#
sizes = map(int, set(self._context._malloc_sizes))
arrays = dict([(s, []) for s in sizes])
# sort all addr in all sizes..
[arrays[self._context._malloc_sizes[i]].append(
long(addr)) for i, addr in enumerate(self._context._malloc_addresses)]
# saving all sizes dictionary in files...
for size, lst in arrays.items():
fout = os.path.sep.join([outdir, 'size.%0.4x' % (size)])
arrays[size] = utils.int_array_save(fout, lst)
# saved all sizes dictionaries.
# tag it as done
file(
os.path.sep.join([outdir, Config.CACHE_SIGNATURE_SIZES_DIR_TAG]), 'w')
self._sizes = arrays
return
示例7: load
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def load(self):
outdir = Config.getCacheFilename(
Config.CACHE_SIGNATURE_GROUPS_DIR,
self._context.dumpname)
inname = os.path.sep.join([outdir, self._name])
self._similarities = utils.int_array_cache(inname)
return
示例8: _loadCache
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def _loadCache(self):
outdir = Config.getCacheFilename(Config.CACHE_SIGNATURE_SIZES_DIR, self._context.dumpname)
fdone = os.path.sep.join([outdir, Config.CACHE_SIGNATURE_SIZES_DIR_TAG])
if not os.access(fdone, os.R_OK):
return False
for myfile in os.listdir(outdir):
try:
addr = int( myfile.split(_)[1], 16 )
except IndexError,e:
continue # ignore file
示例9: persist
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def persist(self):
outdir = Config.getCacheFilename(Config.CACHE_SIGNATURE_GROUPS_DIR, self._context.dumpname)
if not os.path.isdir(outdir):
os.mkdir(outdir)
if not os.access(outdir, os.W_OK):
raise IOError('cant write to %s'%(outdir))
#
outname = os.path.sep.join([outdir,self._name])
ar = utils.int_array_save(outname, self._similarities)
return
示例10: cacheLoad
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def cacheLoad(cls, mappings):
from haystack.reverse.reversers import ReverserContext
dumpname = os.path.normpath(mappings.name)
context_cache = Config.getCacheFilename(Config.CACHE_CONTEXT, dumpname)
try:
context = pickle.load(file(context_cache,'r'))
except EOFError,e:
os.remove(context_cache)
log.error('Error in the context file. File cleaned. Please restart.')
raise e
示例11: getAllocations
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def getAllocations(dumpfilename, mappings, heap):
''' Search malloc_chunks in heap .
records addrs and sizes.
'''
# TODO if linux
import libc.ctypes_malloc
f_addrs = Config.getCacheFilename(Config.CACHE_MALLOC_CHUNKS_ADDRS, dumpfilename+'.%x'%(heap.start))
f_sizes = Config.getCacheFilename(Config.CACHE_MALLOC_CHUNKS_SIZES, dumpfilename+'.%x'%(heap.start))
log.debug('reading from %s'%(f_addrs))
addrs = int_array_cache(f_addrs)
sizes = int_array_cache(f_sizes)
if addrs is None or sizes is None:
log.info('[+] Making new cache - getting malloc_chunks from heap ')
allocations = libc.ctypes_malloc.getUserAllocations(mappings, heap, filterInuse=True)
addrs, sizes = zip(*allocations)
int_array_save(f_addrs, addrs)
int_array_save(f_sizes, sizes)
else:
log.info('[+] Loading from cache')
log.info('\t[-] we have %d malloc_chunks'%(len(addrs)) )
return addrs, sizes
示例12: saveSignatures
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def saveSignatures(cache, structCache, dumpname):
''' cache is {} of sig: [structs] '''
fout = file(Config.getCacheFilename(Config.CACHE_GENERATED_PY_HEADERS,dumpname),'w')
towrite = []
tuples = [(len(structs), sig, structs) for sig,structs in cache.items() ]
tuples.sort(reverse=True)
for l, sig,structs in tuples:
values=''
s='''
# %d structs
#class %s
%s
'''%(len(structs), sig, structs[0].toString())
fout.write(s)
fout.close()
示例13: _loadCache
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def _loadCache(self):
outdir = Config.getCacheFilename(
Config.CACHE_SIGNATURE_SIZES_DIR,
self._context.dumpname)
fdone = os.path.sep.join(
[outdir, Config.CACHE_SIGNATURE_SIZES_DIR_TAG])
if not os.access(fdone, os.R_OK):
return False
for myfile in os.listdir(outdir):
try:
# FIXME: not sure its -
# and what that section is about in general.
addr = int(myfile.split('-')[1], 16)
except IndexError as e:
continue # ignore file
示例14: save_headers
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def save_headers(context, addrs=None):
''' structs_addrs is sorted '''
log.info('[+] saving headers')
fout = file(Config.getCacheFilename(Config.CACHE_GENERATED_PY_HEADERS_VALUES, context.dumpname),'w')
towrite = []
if addrs is None:
addrs = iter(context.listStructuresAddresses())
for vaddr in addrs:
#anon = context._get_structures()[vaddr]
anon = context.getStructureForAddr( vaddr )
towrite.append(anon.toString())
if len(towrite) >= 10000:
try:
fout.write('\n'.join(towrite) )
except UnicodeDecodeError, e:
print 'ERROR on ',anon
towrite = []
fout.flush()
示例15: rewrite
# 需要導入模塊: from haystack.config import Config [as 別名]
# 或者: from haystack.config.Config import getCacheFilename [as 別名]
def rewrite(structs_addrs, structCache, dumpname):
''' structs_addrs is sorted '''
structs_addrs.sort()
fout = file(Config.getCacheFilename(Config.CACHE_GENERATED_PY_HEADERS_VALUES, dumpname),'w')
towrite = []
for vaddr in structs_addrs:
## debug
if vaddr in DEBUG_ADDRS:
logging.getLogger('progressive').setLevel(logging.DEBUG)
else:
logging.getLogger('progressive').setLevel(logging.INFO)
anon = structCache[vaddr]
anon.resolvePointers()
towrite.append(anon.toString())
if len(towrite) >= 10000:
fout.write('\n'.join(towrite) )
towrite = []
fout.write('\n'.join(towrite) )
fout.close()
return