本文整理汇总了Python中cuckoo.core.database.Database类的典型用法代码示例。如果您正苦于以下问题:Python Database类的具体用法?Python Database怎么用?Python Database使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Database类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_tasks
def process_tasks(instance, maxcount, timeout):
count = 0
endtime = 0
db = Database()
if timeout:
endtime = int(time.time() + timeout)
try:
while process_check_stop(count, maxcount, endtime):
task_id = db.processing_get_task(instance)
# Wait a small while before trying to fetch a new task.
if task_id is None:
time.sleep(1)
continue
task = db.view_task(task_id)
log.info("Task #%d: reporting task", task.id)
process_task(task.to_dict())
count += 1
except Exception as e:
log.exception("Caught unknown exception: %s", e)
示例2: test_import_confirm
def test_import_confirm(self, p):
set_cwd(tempfile.mkdtemp())
p.return_value = True
dirpath = init_legacy_analyses()
os.makedirs(os.path.join(dirpath, "lib", "cuckoo", "common"))
open(os.path.join(
dirpath, "lib", "cuckoo", "common", "constants.py"
), "wb").write(constants_11_py)
shutil.copytree(
"tests/files/conf/110_plain", os.path.join(dirpath, "conf")
)
filepath = os.path.join(dirpath, "conf", "cuckoo.conf")
buf = open(filepath, "rb").read()
open(filepath, "wb").write(buf.replace(
"connection =", "connection = %s" % self.URI
))
try:
main.main(
("--cwd", cwd(), "import", dirpath), standalone_mode=False
)
except CuckooOperationalError as e:
assert "SQL database dump as the command" in e.message
assert not is_linux()
return
db = Database()
db.connect()
assert db.engine.name == self.ENGINE
assert open(cwd("logs", "a.txt", analysis=1), "rb").read() == "a"
assert config("cuckoo:database:connection") == self.URI
assert db.count_tasks() == 2
示例3: test_import_noconfirm
def test_import_noconfirm(self, p):
set_cwd(tempfile.mkdtemp())
p.side_effect = True, False
dirpath = init_legacy_analyses()
os.makedirs(os.path.join(dirpath, "lib", "cuckoo", "common"))
open(os.path.join(
dirpath, "lib", "cuckoo", "common", "constants.py"
), "wb").write(constants_11_py)
shutil.copytree(
"tests/files/conf/110_plain", os.path.join(dirpath, "conf")
)
filepath = os.path.join(dirpath, "conf", "cuckoo.conf")
buf = open(filepath, "rb").read()
open(filepath, "wb").write(buf.replace(
"connection =", "connection = %s" % self.URI
))
main.main(
("--cwd", cwd(), "import", dirpath), standalone_mode=False
)
db = Database()
db.connect()
assert db.engine.name == self.ENGINE
assert open(cwd("logs", "a.txt", analysis=1), "rb").read() == "a"
assert config("cuckoo:database:connection") == self.URI
assert db.count_tasks() == 2
示例4: process_task_range
def process_task_range(tasks):
db, task_ids = Database(), []
for entry in tasks.split(","):
if entry.isdigit():
task_ids.append(int(entry))
elif entry.count("-") == 1:
start, end = entry.split("-")
if not start.isdigit() or not end.isdigit():
log.warning("Invalid range provided: %s", entry)
continue
task_ids.extend(range(int(start), int(end)+1))
elif entry:
log.warning("Invalid range provided: %s", entry)
for task_id in sorted(set(task_ids)):
task = db.view_task(task_id)
if not task:
task = {
"id": task_id,
"category": "file",
"target": "",
"options": {},
"package": None,
"custom": None,
}
else:
task = task.to_dict()
if os.path.isdir(cwd(analysis=task_id)):
process_task(Dictionary(task))
示例5: test_connect_default
def test_connect_default(p, q):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
db = Database()
db.connect(create=False)
q.assert_called_once_with(
"sqlite:///%s" % cwd("cuckoo.db"),
connect_args={"check_same_thread": False}
)
assert db.engine.pool_timeout == 60
示例6: __init__
def __init__(self):
self.options = None
self.db = Database()
# Machine table is cleaned to be filled from configuration file
# at each start.
self.db.clean_machines()
示例7: test_connect_pg
def test_connect_pg(p, q):
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"cuckoo": {
"database": {
"connection": "postgresql://foo:[email protected]/foobar",
"timeout": 120,
}
}
})
db = Database()
db.connect(create=False)
q.assert_called_once_with(
"postgresql://foo:[email protected]/foobar",
connect_args={"sslmode": "disable"}
)
assert db.engine.pool_timeout == 120
示例8: process_tasks
def process_tasks(instance, maxcount):
count = 0
db = Database()
try:
while not maxcount or count != maxcount:
task_id = db.processing_get_task(instance)
# Wait a small while before trying to fetch a new task.
if task_id is None:
time.sleep(1)
continue
task = db.view_task(task_id)
log.info("Task #%d: reporting task", task.id)
process_task(task.to_dict())
count += 1
except Exception as e:
log.exception("Caught unknown exception: %s", e)
示例9: process_task
def process_task(task):
db = Database()
try:
task_log_start(task["id"])
logger(
"Starting task reporting",
action="task.report", status="pending",
target=task["target"], category=task["category"],
package=task["package"], options=emit_options(task["options"]),
custom=task["custom"]
)
if task["category"] == "file" and task.get("sample_id"):
sample = db.view_sample(task["sample_id"])
copy_path = cwd("storage", "binaries", sample.sha256)
else:
copy_path = None
try:
process(task["target"], copy_path, task)
db.set_status(task["id"], TASK_REPORTED)
except Exception as e:
log.exception("Task #%d: error reporting: %s", task["id"], e)
db.set_status(task["id"], TASK_FAILED_PROCESSING)
log.info("Task #%d: reports generation completed", task["id"], extra={
"action": "task.report", "status": "success",
})
except Exception as e:
log.exception("Caught unknown exception: %s", e)
finally:
task_log_stop(task["id"])
示例10: __init__
def __init__(self, task_id, error_queue):
"""@param task: task object containing the details for the analysis."""
threading.Thread.__init__(self)
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.storage_binary = ""
self.machine = None
self.db = Database()
self.task = self.db.view_task(task_id)
self.guest_manager = None
self.route = None
self.interface = None
self.rt_table = None
示例11: cuckoo_machine
def cuckoo_machine(vmname, action, ip, platform, options, tags,
interface, snapshot, resultserver):
db = Database()
cfg = Config.from_confdir(cwd("conf"))
machinery = cfg["cuckoo"]["cuckoo"]["machinery"]
machines = cfg[machinery][machinery]["machines"]
if action == "add":
if not ip:
sys.exit("You have to specify a legitimate IP address for --add.")
if db.view_machine(vmname):
sys.exit("A Virtual Machine with this name already exists!")
if vmname in machines:
sys.exit("A Virtual Machine with this name already exists!")
if resultserver and resultserver.count(":") == 1:
resultserver_ip, resultserver_port = resultserver.split(":")
resultserver_port = int(resultserver_port)
else:
resultserver_ip = cfg["cuckoo"]["resultserver"]["ip"]
resultserver_port = cfg["cuckoo"]["resultserver"]["port"]
machines.append(vmname)
cfg[machinery][vmname] = {
"label": vmname,
"platform": platform,
"ip": ip,
"options": options,
"snapshot": snapshot,
"interface": interface,
"resultserver_ip": resultserver_ip,
"resultserver_port": resultserver_port,
"tags": tags,
}
db.add_machine(
vmname, vmname, ip, platform, options, tags, interface, snapshot,
resultserver_ip, int(resultserver_port)
)
db.unlock_machine(vmname)
if action == "delete":
# TODO Add a db.del_machine() function for runtime modification.
if vmname not in machines:
sys.exit("A Virtual Machine with this name doesn't exist!")
machines.remove(vmname)
cfg[machinery].pop(vmname)
write_cuckoo_conf(cfg=cfg)
示例12: init_tasks
def init_tasks():
"""Check tasks and reschedule uncompleted ones."""
db = Database()
log.debug("Checking for locked tasks..")
for task in db.list_tasks(status=TASK_RUNNING):
if config("cuckoo:cuckoo:reschedule"):
task_id = db.reschedule(task.id)
log.info(
"Rescheduled task with ID %s and target %s: task #%s",
task.id, task.target, task_id
)
else:
db.set_status(task.id, TASK_FAILED_ANALYSIS)
log.info(
"Updated running task ID %s status to failed_analysis",
task.id
)
log.debug("Checking for pending service tasks..")
for task in db.list_tasks(status=TASK_PENDING, category="service"):
db.set_status(task.id, TASK_FAILED_ANALYSIS)
示例13: Scheduler
class Scheduler(object):
"""Tasks Scheduler.
This class is responsible for the main execution loop of the tool. It
prepares the analysis machines and keep waiting and loading for new
analysis tasks.
Whenever a new task is available, it launches AnalysisManager which will
take care of running the full analysis process and operating with the
assigned analysis machine.
"""
def __init__(self, maxcount=None):
self.running = True
self.cfg = Config()
self.db = Database()
self.maxcount = maxcount
self.total_analysis_count = 0
def initialize(self):
"""Initialize the machine manager."""
global machinery, machine_lock
machinery_name = self.cfg.cuckoo.machinery
max_vmstartup_count = self.cfg.cuckoo.max_vmstartup_count
if max_vmstartup_count:
machine_lock = threading.Semaphore(max_vmstartup_count)
else:
machine_lock = threading.Lock()
log.info("Using \"%s\" as machine manager", machinery_name, extra={
"action": "init.machinery",
"status": "success",
"machinery": machinery_name,
})
# Initialize the machine manager.
machinery = cuckoo.machinery.plugins[machinery_name]()
# Provide a dictionary with the configuration options to the
# machine manager instance.
machinery.set_options(Config(machinery_name))
# Initialize the machine manager.
try:
machinery.initialize(machinery_name)
except CuckooMachineError as e:
raise CuckooCriticalError("Error initializing machines: %s" % e)
# At this point all the available machines should have been identified
# and added to the list. If none were found, Cuckoo aborts the
# execution. TODO In the future we'll probably want get rid of this.
if not machinery.machines():
raise CuckooCriticalError("No machines available.")
log.info("Loaded %s machine/s", len(machinery.machines()), extra={
"action": "init.machines",
"status": "success",
"count": len(machinery.machines()),
})
if len(machinery.machines()) > 1 and self.db.engine.name == "sqlite":
log.warning("As you've configured Cuckoo to execute parallel "
"analyses, we recommend you to switch to a MySQL or "
"a PostgreSQL database as SQLite might cause some "
"issues.")
if len(machinery.machines()) > 4 and self.cfg.cuckoo.process_results:
log.warning("When running many virtual machines it is recommended "
"to process the results in separate 'cuckoo process' "
"instances to increase throughput and stability. "
"Please read the documentation about the "
"`Processing Utility`.")
# Drop all existing packet forwarding rules for each VM. Just in case
# Cuckoo was terminated for some reason and various forwarding rules
# have thus not been dropped yet.
for machine in machinery.machines():
if not machine.interface:
log.info("Unable to determine the network interface for VM "
"with name %s, Cuckoo will not be able to give it "
"full internet access or route it through a VPN! "
"Please define a default network interface for the "
"machinery or define a network interface for each "
"VM.", machine.name)
continue
# Drop forwarding rule to each VPN.
if config("routing:vpn:enabled"):
for vpn in config("routing:vpn:vpns"):
rooter(
"forward_disable", machine.interface,
config("routing:%s:interface" % vpn), machine.ip
)
# Drop forwarding rule to the internet / dirty line.
if config("routing:routing:internet") != "none":
rooter(
"forward_disable", machine.interface,
config("routing:routing:internet"), machine.ip
)
#.........这里部分代码省略.........
示例14: cuckoo_clean
def cuckoo_clean():
"""Clean up cuckoo setup.
It deletes logs, all stored data from file system and configured
databases (SQL and MongoDB).
"""
# Init logging (without writing to file).
init_console_logging()
try:
# Initialize the database connection.
db = Database()
db.connect(schema_check=False)
# Drop all tables.
db.drop()
except (CuckooDependencyError, CuckooDatabaseError) as e:
# If something is screwed due to incorrect database migrations or bad
# database SqlAlchemy would be unable to connect and operate.
log.warning("Error connecting to database: it is suggested to check "
"the connectivity, apply all migrations if needed or purge "
"it manually. Error description: %s", e)
# Check if MongoDB reporting is enabled and drop the database if it is.
if mongo.init():
try:
mongo.connect()
mongo.drop()
mongo.close()
except Exception as e:
log.warning("Unable to drop MongoDB database: %s", e)
# Check if ElasticSearch reporting is enabled and drop its data if it is.
if elastic.init():
elastic.connect()
# TODO This should be moved to the elastic abstract.
# TODO We should also drop historic data, i.e., from pervious days,
# months, and years.
date_index = datetime.datetime.utcnow().strftime({
"yearly": "%Y",
"monthly": "%Y-%m",
"daily": "%Y-%m-%d",
}[elastic.index_time_pattern])
dated_index = "%s-%s" % (elastic.index, date_index)
elastic.client.indices.delete(
index=dated_index, ignore=[400, 404]
)
template_name = "%s_template" % dated_index
if elastic.client.indices.exists_template(template_name):
elastic.client.indices.delete_template(template_name)
# Paths to clean.
paths = [
cwd("cuckoo.db"),
cwd("log"),
cwd("storage", "analyses"),
cwd("storage", "baseline"),
cwd("storage", "binaries"),
]
# Delete the various files and directories. In case of directories, keep
# the parent directories, so to keep the state of the CWD in tact.
for path in paths:
if os.path.isdir(path):
try:
shutil.rmtree(path)
os.mkdir(path)
except (IOError, OSError) as e:
log.warning("Error removing directory %s: %s", path, e)
elif os.path.isfile(path):
try:
os.unlink(path)
except (IOError, OSError) as e:
log.warning("Error removing file %s: %s", path, e)
示例15: remove
def remove(request, task_id):
"""Remove an analysis.
@todo: remove folder from storage.
"""
analyses = results_db.analysis.find({"info.id": int(task_id)})
# Checks if more analysis found with the same ID, like if process.py
# was run manually.
if analyses.count() > 1:
message = (
"Multiple tasks with this ID deleted, thanks for all the fish "
"(the specified analysis was present multiple times in mongo)."
)
elif analyses.count() == 1:
message = "Task deleted, thanks for all the fish."
if not analyses.count():
return view_error(request, "The specified analysis does not exist")
for analysis in analyses:
# Delete sample if not used.
if "file_id" in analysis["target"]:
if results_db.analysis.find({"target.file_id": ObjectId(analysis["target"]["file_id"])}).count() == 1:
fs.delete(ObjectId(analysis["target"]["file_id"]))
# Delete screenshots.
for shot in analysis["shots"]:
if isinstance(shot, dict):
if "small" in shot:
if results_db.analysis.find({
"shots": ObjectId(shot["small"]),
}).count() == 1:
fs.delete(ObjectId(shot["small"]))
if "original" in shot:
if results_db.analysis.find({
"shots": ObjectId(shot["original"]),
}).count() == 1:
fs.delete(ObjectId(shot["original"]))
continue
if results_db.analysis.find({"shots": ObjectId(shot)}).count() == 1:
fs.delete(ObjectId(shot))
# Delete network pcap.
if "pcap_id" in analysis["network"] and results_db.analysis.find({"network.pcap_id": ObjectId(analysis["network"]["pcap_id"])}).count() == 1:
fs.delete(ObjectId(analysis["network"]["pcap_id"]))
# Delete sorted pcap
if "sorted_pcap_id" in analysis["network"] and results_db.analysis.find({"network.sorted_pcap_id": ObjectId(analysis["network"]["sorted_pcap_id"])}).count() == 1:
fs.delete(ObjectId(analysis["network"]["sorted_pcap_id"]))
# Delete mitmproxy dump.
if "mitmproxy_id" in analysis["network"] and results_db.analysis.find({"network.mitmproxy_id": ObjectId(analysis["network"]["mitmproxy_id"])}).count() == 1:
fs.delete(ObjectId(analysis["network"]["mitmproxy_id"]))
# Delete dropped.
for drop in analysis.get("dropped", []):
if "object_id" in drop and results_db.analysis.find({"dropped.object_id": ObjectId(drop["object_id"])}).count() == 1:
fs.delete(ObjectId(drop["object_id"]))
# Delete calls.
for process in analysis.get("behavior", {}).get("processes", []):
for call in process["calls"]:
results_db.calls.remove({"_id": ObjectId(call)})
# Delete analysis data.
results_db.analysis.remove({"_id": ObjectId(analysis["_id"])})
# Delete from SQL db.
db = Database()
db.delete_task(task_id)
return render_template(request, "success.html", **{
"message": message,
})