本文整理汇总了Python中lib.cuckoo.core.database.Database.view_sample方法的典型用法代码示例。如果您正苦于以下问题:Python Database.view_sample方法的具体用法?Python Database.view_sample怎么用?Python Database.view_sample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.cuckoo.core.database.Database
的用法示例。
在下文中一共展示了Database.view_sample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request):
db = Database()
tasks_files = db.list_tasks(limit=50, category="file", not_status=TASK_PENDING)
tasks_urls = db.list_tasks(limit=50, category="url", not_status=TASK_PENDING)
analyses_files = []
analyses_urls = []
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
filename = os.path.basename(new["target"])
new.update({"filename": filename})
if db.view_errors(task.id):
new["errors"] = True
analyses_files.append(new)
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if db.view_errors(task.id):
new["errors"] = True
analyses_urls.append(new)
return render(request, "analysis/index.html", {
"files": analyses_files,
"urls": analyses_urls,
})
示例2: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request):
db = Database()
tasks_files = db.list_tasks(limit=50, category="file", not_status=TASK_PENDING)
tasks_urls = db.list_tasks(limit=50, category="url", not_status=TASK_PENDING)
analyses_files = []
analyses_urls = []
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
if db.view_errors(task.id):
new["errors"] = True
analyses_files.append(new)
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if db.view_errors(task.id):
new["errors"] = True
analyses_urls.append(new)
return render_to_response("analysis/index.html",
{"files": analyses_files, "urls": analyses_urls},
context_instance=RequestContext(request))
示例3: autoprocess
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def autoprocess(parallel=1):
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
pool = multiprocessing.Pool(parallel)
pending_results = []
# CAUTION - big ugly loop ahead.
while count < maxcount or not maxcount:
# Pending_results maintenance.
for ar, tid, target, copy_path in list(pending_results):
if ar.ready():
if ar.successful():
log.info("Task #%d: reports generation completed", tid)
else:
try:
ar.get()
except:
log.exception("Exception when processing task ID %u.", tid)
db.set_status(tid, TASK_FAILED_PROCESSING)
pending_results.remove((ar, tid, target, copy_path))
# If still full, don't add more (necessary despite pool).
if len(pending_results) >= parallel:
time.sleep(1)
continue
# If we're here, getting parallel tasks should at least
# have one we don't know.
tasks = db.list_tasks(status=TASK_COMPLETED, limit=parallel,
order_by="completed_on asc")
# For loop to add only one, nice.
for task in tasks:
# Not-so-efficient lock.
if task.id in [tid for ar, tid, target, copy_path
in pending_results]:
continue
log.info("Processing analysis data for Task #%d", task.id)
sample = db.view_sample(task.sample_id)
copy_path = os.path.join(CUCKOO_ROOT, "storage",
"binaries", sample.sha256)
args = task.id, task.target, copy_path
kwargs = dict(report=True, auto=True)
result = pool.apply_async(process, args, kwargs)
pending_results.append((result, task.id, task.target, copy_path))
count += 1
break
# If there wasn't anything to add, sleep tight.
if not tasks:
time.sleep(5)
示例4: instance
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def instance(instance):
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
try:
while not maxcount or count != maxcount:
if maxcount:
limit = min(maxcount - count, 32)
else:
limit = 32
tps = db.list_processing_tasks(instance=instance, count=limit)
# No new tasks, we can wait a small while before we query again
# for new tasks.
if not tps:
# Just make sure this instance is still available - it is not
# if the scheduler has been restarted. In that case there will
# be no records at all for this processing task.
if not db.count_processing_tasks(instance):
log.info("This instance (%s) is not available anymore, "
"stopping.", instance)
break
time.sleep(1)
continue
for tp in tps:
task = db.view_task(tp.task_id)
if task.status != TASK_COMPLETED:
log.warning("Task #%d: status (%s) is not completed, "
"ignoring", task.id, task.status)
continue
log.info("Task #%d: reporting task", task.id)
if task.category == "file":
sample = db.view_sample(task.sample_id)
copy_path = os.path.join(CUCKOO_ROOT, "storage",
"binaries", sample.sha256)
else:
copy_path = None
try:
process(task.target, copy_path, task=task.to_dict(),
report=True, auto=True)
db.set_status(task.id, TASK_REPORTED)
except Exception as e:
log.exception("Task #%d: error reporting: %s", task.id, e)
db.set_status(task.id, TASK_FAILED_PROCESSING)
db.delete_processing_task(tp)
except KeyboardInterrupt:
raise
except Exception as e:
log.exception("Caught unknown exception: %s", e)
示例5: experiment
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def experiment(request, experiment_id=None):
db = Database()
if experiment_id:
# Get tasks for the provided experiment
tasks_files = db.list_tasks(limit=50, category="file", experiment=experiment_id)
analyses_files = []
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["timeout"] = time.strftime('%H:%M:%S', time.gmtime(new["timeout"]))
new["target"] = os.path.basename(new["target"])
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
new["pcap_file_id"] = ""
new["pcap_file_length"] = 0
report = results_db.analysis.find({"info.id": int(task.id)}, sort=[("_id", pymongo.DESCENDING)])
if report.count() and "pcap_id" in report[0]["network"]:
file_object = results_db.fs.files.find_one({"_id": ObjectId(report[0]["network"]["pcap_id"])})
file_item = fs.get(ObjectId(file_object["_id"]))
new["pcap_file_id"] = report[0]["network"]["pcap_id"]
new["pcap_file_length"] = file_item.length
if db.view_errors(task.id):
new["errors"] = True
new["experiment"] = task.experiment
analyses_files.append(new)
return render_to_response("analysis/index.html",
{"files": analyses_files},
context_instance=RequestContext(request))
else:
# List all experiments
experiments = db.list_experiments()
for experiment in experiments:
experiment.last_task.timeout = datetime.timedelta(seconds=experiment.last_task.timeout).__str__()
return render_to_response("analysis/experiment.html",
{"experiments": experiments},
context_instance=RequestContext(request))
示例6: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request):
db = Database()
tasks_files = db.list_tasks(limit=50, category="file", not_status=[TASK_PENDING,TASK_SCHEDULED,TASK_UNSCHEDULED])
tasks_urls = db.list_tasks(limit=50, category="url", not_status=[TASK_PENDING,TASK_SCHEDULED,TASK_UNSCHEDULED])
analyses_files = []
analyses_urls = []
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["target"] = os.path.basename(new["target"])
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
new["pcap_file_id"] = ""
new["pcap_file_length"] = 0
report = results_db.analysis.find({"info.id": int(task.id)}, sort=[("_id", pymongo.DESCENDING)])
if report.count() and "pcap_id" in report[0]["network"]:
file_object = results_db.fs.files.find_one({"_id": ObjectId(report[0]["network"]["pcap_id"])})
file_item = fs.get(ObjectId(file_object["_id"]))
new["pcap_file_id"] = report[0]["network"]["pcap_id"]
new["pcap_file_length"] = file_item.length
if db.view_errors(task.id):
new["errors"] = True
new["experiment"] = task.experiment
analyses_files.append(new)
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if db.view_errors(task.id):
new["errors"] = True
new["experiment"] = task.experiment
analyses_urls.append(new)
return render_to_response("analysis/index.html",
{"files": analyses_files, "urls": analyses_urls},
context_instance=RequestContext(request))
示例7: instance
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def instance(instance):
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
# There's a good chance MySQL also works, though.
if db.engine.name != "postgresql":
sys.exit("Due to SQL limitations utils/process2.py currently only "
"supports PostgreSQL.")
try:
while not maxcount or count != maxcount:
task_id = db.processing_get_task(instance)
# Wait a small while before trying to fetch a new task.
if task_id is None:
time.sleep(1)
continue
task = db.view_task(task_id)
log.info("Task #%d: reporting task", task.id)
if task.category == "file":
sample = db.view_sample(task.sample_id)
copy_path = os.path.join(CUCKOO_ROOT, "storage",
"binaries", sample.sha256)
else:
copy_path = None
try:
process(task.target, copy_path, task=task.to_dict(),
report=True, auto=True)
db.set_status(task.id, TASK_REPORTED)
except Exception as e:
log.exception("Task #%d: error reporting: %s", task.id, e)
db.set_status(task.id, TASK_FAILED_PROCESSING)
except Exception as e:
log.exception("Caught unknown exception: %s", e)
示例8: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request):
db = Database()
tasks_files = db.list_tasks(limit=50, category="file", not_status=TASK_PENDING)
tasks_urls = db.list_tasks(limit=50, category="url", not_status=TASK_PENDING)
analyses_files = []
analyses_urls = []
##import pprint
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
if db.view_errors(task.id):
new["errors"] = True
# obtain station and file name with target
filepath = new["target"]
filedata = filepath.split('/')
new["file"] = filedata[3] if len(filedata) > 3 else filedata[2]
new["station"] = filedata[2] if len(filedata) > 3 else ""
analyses_files.append(new)
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if db.view_errors(task.id):
new["errors"] = True
analyses_urls.append(new)
#pprint.pprint(analyses_files[0])
return render_to_response("analysis/index.html",
{"files": analyses_files, "urls": analyses_urls},
context_instance=RequestContext(request))
示例9: search
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def search(request):
if "search" not in request.POST:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": None},
context_instance=RequestContext(request))
search = request.POST["search"].strip()
if ":" in search:
term, value = search.split(":", 1)
else:
term, value = "", search
if term:
# Check on search size.
if len(value) < 3:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": request.POST["search"],
"error": "Search term too short, minimum 3 characters required"},
context_instance=RequestContext(request))
# name:foo or name: foo
value = value.lstrip()
# Search logic.
if term == "name":
records = results_db.analysis.find({"target.file.name": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "type":
records = results_db.analysis.find({"target.file.type": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "string":
records = results_db.analysis.find({"strings": {"$regex": value, "$options": "-1"}}).sort([["_id", -1]])
elif term == "ssdeep":
records = results_db.analysis.find({"target.file.ssdeep": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "crc32":
records = results_db.analysis.find({"target.file.crc32": value}).sort([["_id", -1]])
elif term == "file":
records = results_db.analysis.find({"behavior.summary.files": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "key":
records = results_db.analysis.find({"behavior.summary.keys": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "mutex":
records = results_db.analysis.find({"behavior.summary.mutexes": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "domain":
records = results_db.analysis.find({"network.domains.domain": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "ip":
records = results_db.analysis.find({"network.hosts": value}).sort([["_id", -1]])
elif term == "signature":
records = results_db.analysis.find({"signatures.description": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "url":
records = results_db.analysis.find({"target.url": value}).sort([["_id", -1]])
elif term == "imphash":
records = results_db.analysis.find({"static.pe_imphash": value}).sort([["_id", -1]])
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": request.POST["search"],
"error": "Invalid search term: %s" % term},
context_instance=RequestContext(request))
else:
value = value.lower()
if re.match(r"^([a-fA-F\d]{32})$", value):
records = results_db.analysis.find({"target.file.md5": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{40})$", value):
records = results_db.analysis.find({"target.file.sha1": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{64})$", value):
records = results_db.analysis.find({"target.file.sha256": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{128})$", value):
records = results_db.analysis.find({"target.file.sha512": value}).sort([["_id", -1]])
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": "Unable to recognize the search syntax"},
context_instance=RequestContext(request))
# Get data from cuckoo db.
db = Database()
analyses = []
for result in records:
new = db.view_task(result["info"]["id"])
if not new:
continue
new = new.to_dict()
if result["info"]["category"] == "file":
if new["sample_id"]:
sample = db.view_sample(new["sample_id"])
if sample:
new["sample"] = sample.to_dict()
analyses.append(new)
return render_to_response("analysis/search.html",
{"analyses": analyses,
"term": request.POST["search"],
"error": None},
#.........这里部分代码省略.........
示例10: autoprocess
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def autoprocess(parallel=1):
maxcount = cfg.cuckoo.max_analysis_count
count = 0
db = Database()
pending_results = {}
# Respawn a worker process every 1000 tasks just in case we
# have any memory leaks.
pool = multiprocessing.Pool(processes=parallel, initializer=init_worker,
maxtasksperchild=1000)
try:
while True:
# Pending results maintenance.
for tid, ar in pending_results.items():
if not ar.ready():
continue
if ar.successful():
log.info("Task #%d: reports generation completed", tid)
db.set_status(tid, TASK_REPORTED)
else:
try:
ar.get()
except Exception as e:
log.critical("Task #%d: exception in reports generation: %s", tid, e)
if hasattr(e, "traceback"):
log.info(e.traceback)
db.set_status(tid, TASK_FAILED_PROCESSING)
pending_results.pop(tid)
count += 1
# Make sure our queue has plenty of tasks in it.
if len(pending_results) >= QUEUE_THRESHOLD:
time.sleep(1)
continue
# End of processing?
if maxcount and count == maxcount:
break
# No need to submit further tasks for reporting as we've already
# gotten to our maximum.
if maxcount and count + len(pending_results) == maxcount:
time.sleep(1)
continue
# Get at most queue threshold new tasks. We skip the first N tasks
# where N is the amount of entries in the pending results list.
# Given we update a tasks status right before we pop it off the
# pending results list it is guaranteed that we skip over all of
# the pending tasks in the database and no further.
if maxcount:
limit = maxcount - count - len(pending_results)
else:
limit = QUEUE_THRESHOLD
tasks = db.list_tasks(status=TASK_COMPLETED,
offset=len(pending_results),
limit=min(limit, QUEUE_THRESHOLD),
order_by=Task.completed_on)
# No new tasks, we can wait a small while before we query again
# for new tasks.
if not tasks:
time.sleep(5)
continue
for task in tasks:
# Ensure that this task is not already in the pending list.
# This is really mostly for debugging and should never happen.
assert task.id not in pending_results
log.info("Task #%d: queueing for reporting", task.id)
if task.category == "file":
sample = db.view_sample(task.sample_id)
copy_path = os.path.join(CUCKOO_ROOT, "storage",
"binaries", sample.sha256)
else:
copy_path = None
args = task.target, copy_path
kwargs = dict(report=True, auto=True, task=task.to_dict())
result = pool.apply_async(process_wrapper, args, kwargs)
pending_results[task.id] = result
except KeyboardInterrupt:
pool.terminate()
raise
except:
log.exception("Caught unknown exception")
finally:
pool.join()
示例11: AnalysisManager
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
class AnalysisManager(threading.Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
threading.Thread.__init__(self)
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
self.db = Database()
self.interface = None
self.rt_table = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Task #{0}: Analysis results folder already exists at path '{1}', "
"analysis aborted".format(self.task.id, self.storage))
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Task #{0}: Unable to create analysis folder {1}".format(self.task.id, self.storage))
return False
return True
def check_file(self):
"""Checks the integrity of the file to be analyzed."""
sample = self.db.view_sample(self.task.sample_id)
sha256 = File(self.task.target).get_sha256()
if sha256 != sample.sha256:
log.error("Task #{0}: Target file has been modified after submission: "
"'{1}'".format(self.task.id, self.task.target))
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("Task #{0}: The file to analyze does not exist at path '{1}', "
"analysis aborted".format(self.task.id, self.task.target))
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("Task #{0}: File already exists at '{1}'".format(self.task.id, self.binary))
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Task #{0}: Unable to store file from '{1}' to '{2}', "
"analysis aborted".format(self.task.id, self.task.target, self.binary))
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Task #{0}: Unable to create symlink/copy from '{1}' to "
"'{2}': {3}".format(self.task.id, self.binary, self.storage, e))
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
#.........这里部分代码省略.........
示例12: search
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
#.........这里部分代码省略.........
records = results_db.analysis.find({"behavior.summary.mutexes": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "domain":
records = results_db.analysis.find({"network.domains.domain": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "ip":
records = results_db.analysis.find({"network.hosts.ip": value}).sort([["_id", -1]])
elif term == "signature":
records = results_db.analysis.find({"signatures.description": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "signame":
records = results_db.analysis.find({"signatures.name": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "url":
records = results_db.analysis.find({"target.url": value}).sort([["_id", -1]])
elif term == "imphash":
records = results_db.analysis.find({"static.pe_imphash": value}).sort([["_id", -1]])
elif term == "surialert":
records = results_db.analysis.find({"suricata.alerts": {"$regex" : value, "$options" : "-1"}}).sort([["_id", -1]])
elif term == "surihttp":
records = results_db.analysis.find({"suricata.http": {"$regex" : value, "$options" : "-1"}}).sort([["_id", -1]])
elif term == "suritls":
records = results_db.analysis.find({"suricata.tls": {"$regex" : value, "$options" : "-1"}}).sort([["_id", -1]])
elif term == "clamav":
records = results_db.analysis.find({"target.file.clamav": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "yaraname":
records = results_db.analysis.find({"target.file.yara.name": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "strings":
records = results_db.analysis.find({"strings": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
elif term == "virustotal":
records = results_db.analysis.find({"virustotal.results.sig": {"$regex": value, "$options": "-i"}}).sort([["_id", -1]])
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": request.POST["search"],
"error": "Invalid search term: %s" % term},
context_instance=RequestContext(request))
else:
# hash matching is lowercase and case sensitive
value = value.lower()
if re.match(r"^([a-fA-F\d]{32})$", value):
records = results_db.analysis.find({"target.file.md5": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{40})$", value):
records = results_db.analysis.find({"target.file.sha1": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{64})$", value):
records = results_db.analysis.find({"target.file.sha256": value}).sort([["_id", -1]])
elif re.match(r"^([a-fA-F\d]{128})$", value):
records = results_db.analysis.find({"target.file.sha512": value}).sort([["_id", -1]])
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": "Unable to recognize the search syntax"},
context_instance=RequestContext(request))
# Get data from cuckoo db.
db = Database()
analyses = []
for result in records:
new = db.view_task(result["info"]["id"])
if not new:
continue
new = new.to_dict()
if result["info"]["category"] == "file":
if new["sample_id"]:
sample = db.view_sample(new["sample_id"])
if sample:
new["sample"] = sample.to_dict()
filename = os.path.basename(new["target"])
new.update({"filename": filename})
rtmp = results_db.analysis.find_one({"info.id": int(new["id"])},{"virustotal_summary": 1, "suri_tls_cnt": 1, "suri_alert_cnt": 1, "suri_http_cnt": 1, "suri_file_cnt": 1, "mlist_cnt": 1},sort=[("_id", pymongo.DESCENDING)])
if rtmp:
if rtmp.has_key("virustotal_summary") and rtmp["virustotal_summary"]:
new["virustotal_summary"] = rtmp["virustotal_summary"]
if rtmp.has_key("suri_tls_cnt") and rtmp["suri_tls_cnt"]:
new["suri_tls_cnt"] = rtmp["suri_tls_cnt"]
if rtmp.has_key("suri_alert_cnt") and rtmp["suri_alert_cnt"]:
new["suri_alert_cnt"] = rtmp["suri_alert_cnt"]
if rtmp.has_key("suri_file_cnt") and rtmp["suri_file_cnt"]:
new["suri_file_cnt"] = rtmp["suri_file_cnt"]
if rtmp.has_key("suri_http_cnt") and rtmp["suri_http_cnt"]:
new["suri_http_cnt"] = rtmp["suri_http_cnt"]
if rtmp.has_key("mlist_cnt") and rtmp["mlist_cnt"]:
new["mlist_cnt"] = rtmp["mlist_cnt"]
if settings.MOLOCH_ENABLED:
if settings.MOLOCH_BASE[-1] != "/":
settings.MOLOCH_BASE = settings.MOLOCH_BASE + "/"
new["moloch_url"] = settings.MOLOCH_BASE + "?date=-1&expression=tags" + quote("\x3d\x3d\x22%s\x3a%s\x22" % (settings.MOLOCH_NODE,new["id"]),safe='')
analyses.append(new)
return render_to_response("analysis/search.html",
{"analyses": analyses,
"term": request.POST["search"],
"error": None},
context_instance=RequestContext(request))
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": None},
context_instance=RequestContext(request))
示例13: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request, page=1):
page = int(page)
db = Database()
if page == 0:
page = 1
off = (page - 1) * TASK_LIMIT
tasks_files = db.list_tasks(limit=TASK_LIMIT, offset=off, category="file", not_status=TASK_PENDING)
tasks_urls = db.list_tasks(limit=TASK_LIMIT, offset=off, category="url", not_status=TASK_PENDING)
analyses_files = []
analyses_urls = []
# Vars to define when to show Next/Previous buttons
paging = dict()
paging["show_file_next"] = "show"
paging["show_url_next"] = "show"
paging["next_page"] = str(page + 1)
paging["prev_page"] = str(page - 1)
# On a fresh install, we need handle where there are 0 tasks.
buf = db.list_tasks(limit=1, category="file", not_status=TASK_PENDING, order_by="added_on asc")
if len(buf) == 1:
first_file = db.list_tasks(limit=1, category="file", not_status=TASK_PENDING, order_by="added_on asc")[0].to_dict()["id"]
paging["show_file_prev"] = "show"
else:
paging["show_file_prev"] = "hide"
buf = db.list_tasks(limit=1, category="url", not_status=TASK_PENDING, order_by="added_on asc")
if len(buf) == 1:
first_url = db.list_tasks(limit=1, category="url", not_status=TASK_PENDING, order_by="added_on asc")[0].to_dict()["id"]
paging["show_url_prev"] = "show"
else:
paging["show_url_prev"] = "hide"
if tasks_files:
for task in tasks_files:
new = task.to_dict()
if new["id"] == first_file:
paging["show_file_next"] = "hide"
if page <= 1:
paging["show_file_prev"] = "hide"
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
filename = os.path.basename(new["target"])
new.update({"filename": filename})
if db.view_errors(task.id):
new["errors"] = True
rtmp = results_db.analysis.find_one({"info.id": int(new["id"])},{"virustotal_summary": 1, "suri_tls_cnt": 1, "suri_alert_cnt": 1, "suri_http_cnt": 1, "suri_file_cnt": 1},sort=[("_id", pymongo.DESCENDING)])
if rtmp:
if rtmp.has_key("virustotal_summary") and rtmp["virustotal_summary"]:
new["virustotal_summary"] = rtmp["virustotal_summary"]
if rtmp.has_key("suri_tls_cnt") and rtmp["suri_tls_cnt"]:
new["suri_tls_cnt"] = rtmp["suri_tls_cnt"]
if rtmp.has_key("suri_alert_cnt") and rtmp["suri_alert_cnt"]:
new["suri_alert_cnt"] = rtmp["suri_alert_cnt"]
if rtmp.has_key("suri_file_cnt") and rtmp["suri_file_cnt"]:
new["suri_file_cnt"] = rtmp["suri_file_cnt"]
if rtmp.has_key("suri_http_cnt") and rtmp["suri_http_cnt"]:
new["suri_http_cnt"] = rtmp["suri_http_cnt"]
if settings.MOLOCH_ENABLED:
if settings.MOLOCH_BASE[-1] != "/":
settings.MOLOCH_BASE = settings.MOLOCH_BASE + "/"
new["moloch_url"] = settings.MOLOCH_BASE + "?date=-1&expression=tags" + quote("\x3d\x3d\x22%s\x3a%s\x22" % (settings.MOLOCH_NODE,new["id"]),safe='')
analyses_files.append(new)
else:
paging["show_file_next"] = "hide"
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if new["id"] == first_url:
paging["show_url_next"] = "hide"
if page <= 1:
paging["show_url_prev"] = "hide"
if db.view_errors(task.id):
new["errors"] = True
rtmp = results_db.analysis.find_one({"info.id": int(new["id"])},{"virustotal_summary": 1, "suri_tls_cnt": 1, "suri_alert_cnt": 1, "suri_http_cnt": 1, "suri_file_cnt": 1},sort=[("_id", pymongo.DESCENDING)])
if rtmp:
if rtmp.has_key("virustotal_summary") and rtmp["virustotal_summary"]:
new["virustotal_summary"] = rtmp["virustotal_summary"]
if rtmp.has_key("suri_tls_cnt") and rtmp["suri_tls_cnt"]:
new["suri_tls_cnt"] = rtmp["suri_tls_cnt"]
if rtmp.has_key("suri_alert_cnt") and rtmp["suri_alert_cnt"]:
new["suri_alert_cnt"] = rtmp["suri_alert_cnt"]
if rtmp.has_key("suri_file_cnt") and rtmp["suri_file_cnt"]:
new["suri_file_cnt"] = rtmp["suri_file_cnt"]
if rtmp.has_key("suri_http_cnt") and rtmp["suri_http_cnt"]:
new["suri_http_cnt"] = rtmp["suri_http_cnt"]
if settings.MOLOCH_ENABLED:
if settings.MOLOCH_BASE[-1] != "/":
settings.MOLOCH_BASE = settings.MOLOCH_BASE + "/"
new["moloch_url"] = settings.MOLOCH_BASE + "?date=-1&expression=tags" + quote("\x3d\x3d\x22%s\x3a%s\x22" % (settings.MOLOCH_NODE,new["id"]),safe='')
analyses_urls.append(new)
else:
#.........这里部分代码省略.........
示例14: search
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
#.........这里部分代码省略.........
"term": request.POST["search"],
"error": "Invalid search term: %s" % term},
context_instance=RequestContext(request))
else:
value = value.lower()
if re.match(r"^([a-fA-F\d]{32})$", value):
result = es.search(
index="cuckoo",
doc_type="analysis",
q='target.file.md5 : "' + value + '"'
)
records = []
records.append(result['hits']['hits'][0]['_source'])
elif re.match(r"^([a-fA-F\d]{40})$", value):
result = es.search(
index="cuckoo",
doc_type="analysis",
q='target.file.sha1 : "' + value + '"'
)
records = []
records.append(result['hits']['hits'][0]['_source'])
elif re.match(r"^([a-fA-F\d]{64})$", value):
result = es.search(
index="cuckoo",
doc_type="analysis",
q='target.file.sha256 : "' + value + '"'
)
records = []
records.append(result['hits']['hits'][0]['_source'])
elif re.match(r"^([a-fA-F\d]{128})$", value):
result = es.search(
index="cuckoo",
doc_type="analysis",
body={"query":
{
"term" : {
"target.file.sha512" : value
}
}
}
)
records = []
records.append(result['hits']['hits'][0]['_source'])
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": "Unable to recognize the search syntax"},
context_instance=RequestContext(request))
# Get data from cuckoo db.
db = Database()
analyses = []
print result
for result in records:
new = db.view_task(result["info"]["id"])
if not new:
continue
new = new.to_dict()
if result["info"]["category"] == "file":
if new["sample_id"]:
sample = db.view_sample(new["sample_id"])
if sample:
new["sample"] = sample.to_dict()
analyses.append(new)
return render_to_response("analysis/search.html",
{"analyses": analyses,
"term": request.POST["search"],
"error": None},
context_instance=RequestContext(request))
else:
return render_to_response("analysis/search.html",
{"analyses": None,
"term": None,
"error": None},
context_instance=RequestContext(request))
示例15: index
# 需要导入模块: from lib.cuckoo.core.database import Database [as 别名]
# 或者: from lib.cuckoo.core.database.Database import view_sample [as 别名]
def index(request):
db = Database()
tasks_files = db.list_tasks(limit=50, category="file", not_status=TASK_PENDING)
tasks_urls = db.list_tasks(limit=50, category="url", not_status=TASK_PENDING)
analyses_files = []
analyses_urls = []
if tasks_files:
for task in tasks_files:
new = task.to_dict()
new["sample"] = db.view_sample(new["sample_id"]).to_dict()
if db.view_errors(task.id):
new["errors"] = True
rtmp = results_db.analysis.find_one({"info.id": int(new["id"])},{"virustotal_summary": 1, "suri_tls_cnt": 1, "suri_alert_cnt": 1, "suri_http_cnt": 1, "suri_file_cnt": 1, "suricata.http_log_id": 1, "suricata.tls_log_id": 1, "suricata.fast_log_id": 1, "suricata.file_log_id": 1, "mlist_cnt": 1, "network.pcap_id":1},sort=[("_id", pymongo.DESCENDING)])
if rtmp:
if rtmp.has_key("virustotal_summary") and rtmp["virustotal_summary"]:
new["virustotal_summary"] = rtmp["virustotal_summary"]
if rtmp.has_key("suri_tls_cnt") and rtmp["suri_tls_cnt"]:
new["suri_tls_cnt"] = rtmp["suri_tls_cnt"]
if rtmp.has_key("suri_alert_cnt") and rtmp["suri_alert_cnt"]:
new["suri_alert_cnt"] = rtmp["suri_alert_cnt"]
if rtmp.has_key("suri_file_cnt") and rtmp["suri_file_cnt"]:
new["suri_file_cnt"] = rtmp["suri_file_cnt"]
if rtmp.has_key("suri_http_cnt") and rtmp["suri_http_cnt"]:
new["suri_http_cnt"] = rtmp["suri_http_cnt"]
if rtmp.has_key("suricata") and rtmp["suricata"]:
if rtmp["suricata"].has_key("http_log_id") and rtmp["suricata"]["http_log_id"]:
new["suricata_http_log_id"] = rtmp["suricata"]["http_log_id"]
if rtmp["suricata"].has_key("tls_log_id") and rtmp["suricata"]["tls_log_id"]:
new["suricata_tls_log_id"] = rtmp["suricata"]["tls_log_id"]
if rtmp["suricata"].has_key("fast_log_id") and rtmp["suricata"]["fast_log_id"]:
new["suricata_fast_log_id"] = rtmp["suricata"]["fast_log_id"]
if rtmp["suricata"].has_key("file_log_id") and rtmp["suricata"]["file_log_id"]:
new["suricata_file_log_id"] = rtmp["suricata"]["file_log_id"]
if rtmp.has_key("mlist_cnt") and rtmp["mlist_cnt"]:
new["mlist_cnt"] = rtmp["mlist_cnt"]
if rtmp.has_key("network") and rtmp["network"].has_key("pcap_id") and rtmp["network"]["pcap_id"]:
new["pcap_id"] = rtmp["network"]["pcap_id"]
if settings.MOLOCH_ENABLED:
if settings.MOLOCH_BASE[-1] != "/":
settings.MOLOCH_BASE = settings.MOLOCH_BASE + "/"
new["moloch_url"] = settings.MOLOCH_BASE + "?date=-1&expression=tags" + quote("\x3d\x3d\x22%s\x3a%s\x22" % (settings.MOLOCH_NODE,new["id"]),safe='')
analyses_files.append(new)
if tasks_urls:
for task in tasks_urls:
new = task.to_dict()
if db.view_errors(task.id):
new["errors"] = True
rtmp = results_db.analysis.find_one({"info.id": int(new["id"])},{"virustotal_summary": 1, "suri_tls_cnt": 1, "suri_alert_cnt": 1, "suri_http_cnt": 1, "suri_file_cnt": 1, "suricata.http_log_id": 1, "suricata.tls_log_id": 1, "suricata.fast_log_id": 1, "suricata.file_log_id": 1, "mlist_cnt": 1, "network.pcap_id":1},sort=[("_id", pymongo.DESCENDING)])
if rtmp:
if rtmp.has_key("virustotal_summary") and rtmp["virustotal_summary"]:
new["virustotal_summary"] = rtmp["virustotal_summary"]
if rtmp.has_key("suri_tls_cnt") and rtmp["suri_tls_cnt"]:
new["suri_tls_cnt"] = rtmp["suri_tls_cnt"]
if rtmp.has_key("suri_alert_cnt") and rtmp["suri_alert_cnt"]:
new["suri_alert_cnt"] = rtmp["suri_alert_cnt"]
if rtmp.has_key("suri_file_cnt") and rtmp["suri_file_cnt"]:
new["suri_file_cnt"] = rtmp["suri_file_cnt"]
if rtmp.has_key("suri_http_cnt") and rtmp["suri_http_cnt"]:
new["suri_http_cnt"] = rtmp["suri_http_cnt"]
if rtmp.has_key("suricata") and rtmp["suricata"]:
if rtmp["suricata"].has_key("http_log_id") and rtmp["suricata"]["http_log_id"]:
new["suricata_http_log_id"] = rtmp["suricata"]["http_log_id"]
if rtmp["suricata"].has_key("tls_log_id") and rtmp["suricata"]["tls_log_id"]:
new["suricata_tls_log_id"] = rtmp["suricata"]["tls_log_id"]
if rtmp["suricata"].has_key("fast_log_id") and rtmp["suricata"]["fast_log_id"]:
new["suricata_fast_log_id"] = rtmp["suricata"]["fast_log_id"]
if rtmp["suricata"].has_key("file_log_id") and rtmp["suricata"]["file_log_id"]:
new["suricata_file_log_id"] = rtmp["suricata"]["file_log_id"]
if rtmp.has_key("mlist_cnt") and rtmp["mlist_cnt"]:
new["mlist_cnt"] = rtmp["mlist_cnt"]
if rtmp.has_key("network") and rtmp["network"].has_key("pcap_id") and rtmp["network"]["pcap_id"]:
new["pcap_id"] = rtmp["network"]["pcap_id"]
if settings.MOLOCH_ENABLED:
if settings.MOLOCH_BASE[-1] != "/":
settings.MOLOCH_BASE = settings.MOLOCH_BASE + "/"
new["moloch_url"] = settings.MOLOCH_BASE + "?date=-1&expression=tags" + quote("\x3d\x3d\x22%s\x3a%s\x22" % (settings.MOLOCH_NODE,new["id"]),safe='')
analyses_urls.append(new)
return render_to_response("analysis/index.html",
{"files": analyses_files, "urls": analyses_urls},
context_instance=RequestContext(request))