本文整理汇总了Python中globaleaks.settings.GLSettings类的典型用法代码示例。如果您正苦于以下问题:Python GLSettings类的具体用法?Python GLSettings怎么用?Python GLSettings使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GLSettings类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_mail_creation
def process_mail_creation(self, store, data):
receiver_id = data['receiver']['id']
# Do not spool emails if the receiver has opted out of ntfns for this tip.
if not data['tip']['enable_notifications']:
log.debug("Discarding emails for %s due to receiver's preference." % receiver_id)
return
# https://github.com/globaleaks/GlobaLeaks/issues/798
# TODO: the current solution is global and configurable only by the admin
sent_emails = GLSettings.get_mail_counter(receiver_id)
if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
log.debug("Discarding emails for receiver %s due to threshold already exceeded for the current hour" %
receiver_id)
return
GLSettings.increment_mail_counter(receiver_id)
if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
log.info("Reached threshold of %d emails with limit of %d for receiver %s" % (
sent_emails,
GLSettings.memory_copy.notification_threshold_per_hour,
receiver_id)
)
# simply changing the type of the notification causes
# to send the notification_limit_reached
data['type'] = u'receiver_notification_limit_reached'
data['notification'] = db_get_notification(store, data['receiver']['language'])
data['node'] = db_admin_serialize_node(store, data['receiver']['language'])
if not data['node']['allow_unencrypted'] and data['receiver']['pgp_key_status'] != u'enabled':
return
subject, body = Templating().get_mail_subject_and_body(data)
# If the receiver has encryption enabled encrypt the mail body
if data['receiver']['pgp_key_status'] == u'enabled':
gpob = GLBPGP()
try:
gpob.load_key(data['receiver']['pgp_key_public'])
body = gpob.encrypt_message(data['receiver']['pgp_key_fingerprint'], body)
except Exception as excep:
log.err("Error in PGP interface object (for %s: %s)! (notification+encryption)" %
(data['receiver']['username'], str(excep)))
return
finally:
# the finally statement is always called also if
# except contains a return or a raise
gpob.destroy_environment()
mail = models.Mail({
'address': data['receiver']['mail_address'],
'subject': subject,
'body': body
})
store.add(mail)
示例2: migrate_model
def migrate_model(self, model_name):
objs_count = self.store_old.find(self.model_from[model_name]).count()
specific_migration_function = getattr(self, 'migrate_%s' % model_name, None)
if specific_migration_function is not None:
GLSettings.print_msg(' ł %s [#%d]' % (model_name, objs_count))
specific_migration_function()
else:
GLSettings.print_msg(' * %s [#%d]' % (model_name, objs_count))
self.generic_migration_function(model_name)
示例3: setUp
def setUp(self):
GLSettings.set_devel_mode()
GLSettings.logging = None
GLSettings.scheduler_threadpool = FakeThreadPool()
GLSettings.sessions = {}
GLSettings.failed_login_attempts = 0
if os.path.isdir('/dev/shm'):
GLSettings.working_path = '/dev/shm/globaleaks'
GLSettings.ramdisk_path = '/dev/shm/globaleaks/ramdisk'
else:
GLSettings.working_path = './working_path'
GLSettings.ramdisk_path = './working_path/ramdisk'
GLSettings.eval_paths()
GLSettings.remove_directories()
GLSettings.create_directories()
self.setUp_dummy()
yield db.create_tables(self.create_node)
for fixture in getattr(self, 'fixtures', []):
yield import_fixture(fixture)
yield import_memory_variables()
# override of imported memory variables
GLSettings.memory_copy.allow_unencrypted = True
anomaly.Alarm.reset()
event.EventTrackQueue.reset()
statistics_sched.StatisticsSchedule.reset()
self.internationalized_text = load_appdata()['node']['whistleblowing_button']
示例4: __init__
def __init__(self, migration_mapping, start_version, store_old, store_new):
self.appdata = load_appdata()
self.migration_mapping = migration_mapping
self.start_version = start_version
self.store_old = store_old
self.store_new = store_new
self.model_from = {}
self.model_to = {}
self.entries_count = {}
self.fail_on_count_mismatch = {}
for model_name, model_history in migration_mapping.iteritems():
length = DATABASE_VERSION + 1 - FIRST_DATABASE_VERSION_SUPPORTED
if len(model_history) != length:
raise TypeError('Expecting a table with {} statuses ({})'.format(length, model_name))
self.fail_on_count_mismatch[model_name] = True
self.model_from[model_name] = self.get_right_model(model_name, start_version)
self.model_to[model_name] = self.get_right_model(model_name, start_version + 1)
if self.model_from[model_name] is not None and self.model_to[model_name] is not None:
self.entries_count[model_name] = self.store_old.find(self.model_from[model_name]).count()
else:
self.entries_count[model_name] = 0
if self.start_version + 1 == DATABASE_VERSION:
# we are there!
if not os.access(GLSettings.db_schema, os.R_OK):
GLSettings.print_msg("Unable to access %s ' % GLSettings.db_schema")
raise IOError('Unable to access db schema file')
with open(GLSettings.db_schema) as f:
queries = ''.join(f).split(';')
for query in queries:
self.execute_query(query)
else: # manage the migrantion here
for k, _ in self.migration_mapping.iteritems():
query = self.get_right_sql_version(k, self.start_version + 1)
if not query:
# the table has been removed
continue
self.execute_query(query)
self.store_new.commit()
示例5: setUp
def setUp(self):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
os.mkdir(GLSettings.db_path)
db_name = 'glbackend-%d.db' % DATABASE_VERSION
db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name)
shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name))
self.db_file = os.path.join(GLSettings.db_path, db_name)
GLSettings.db_uri = GLSettings.make_db_uri(self.db_file)
# place a dummy version in the current db
store = Store(create_database(GLSettings.db_uri))
prv = config.PrivateFactory(store)
self.dummy_ver = '2.XX.XX'
prv.set_val('version', self.dummy_ver)
self.assertEqual(prv.get_val('version'), self.dummy_ver)
store.commit()
store.close()
# backup various mocks that we will use
self._bck_f = config.is_cfg_valid
GLConfig['private']['xx_smtp_password'] = GLConfig['private'].pop('smtp_password')
self.dp = u'yes_you_really_should_change_me'
示例6: init_glsettings_for_unit_tests
def init_glsettings_for_unit_tests():
GLSettings.testing = True
GLSettings.set_devel_mode()
GLSettings.logging = None
GLSettings.scheduler_threadpool = FakeThreadPool()
GLSettings.sessions.clear()
GLSettings.failed_login_attempts = 0
GLSettings.working_path = './working_path'
GLSettings.ramdisk_path = os.path.join(GLSettings.working_path, 'ramdisk')
GLSettings.eval_paths()
GLSettings.remove_directories()
GLSettings.create_directories()
示例7: _wrap
def _wrap(self, function, *args, **kwargs):
"""
Wrap provided function calling it inside a thread and
passing the store to it.
"""
self.store = self.get_store()
try:
if self.instance:
result = function(self.instance, self.store, *args, **kwargs)
else:
result = function(self.store, *args, **kwargs)
except exceptions.DisconnectionError as e:
transaction.abort()
# we print the exception here because we do not propagate it
GLSettings.log_debug(e)
result = None
except exceptions.IntegrityError as e:
transaction.abort()
raise DatabaseIntegrityError(str(e))
except HTTPError as excep:
transaction.abort()
raise excep
except:
transaction.abort()
self.store.close()
# propagate the exception
raise
else:
if not self.readonly:
self.store.commit()
else:
self.store.flush()
self.store.invalidate()
finally:
self.store.close()
return result
示例8: _test
def _test(self, path, f):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
final_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % DATABASE_VERSION))
GLSettings.db_uri = GLSettings.make_db_uri(final_db_file)
os.mkdir(GLSettings.db_path)
dbpath = os.path.join(path, f)
dbfile = os.path.join(GLSettings.db_path, f)
shutil.copyfile(dbpath, dbfile)
ret = perform_system_update()
shutil.rmtree(GLSettings.db_path)
self.assertNotEqual(ret, -1)
示例9: _initStartDB
def _initStartDB(self, target_ver):
helpers.init_glsettings_for_unit_tests()
GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test')
os.mkdir(GLSettings.db_path)
db_name = 'glbackend-%d.db' % target_ver
db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name)
shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name))
self.db_file = os.path.join(GLSettings.db_path, db_name)
GLSettings.db_uri = GLSettings.make_db_uri(self.db_file)
self.store = Store(create_database(GLSettings.db_uri))
示例10: start_globaleaks
def start_globaleaks(self):
try:
GLSettings.fix_file_permissions()
GLSettings.drop_privileges()
GLSettings.check_directories()
# Check presence of an existing database and eventually perform its migration
check = check_db_files()
if check == -1:
self._reactor.stop()
elif check == 0:
yield init_db()
else:
yield update_version()
yield init_appdata()
yield clean_untracked_files()
yield refresh_memory_variables()
if GLSettings.cmdline_options:
yield apply_cmdline_options()
self.start_asynchronous_jobs()
log.msg("GLBackend is now running")
for ip in GLSettings.bind_addresses:
log.msg("Visit http://%s:%d to interact with me" % (ip, GLSettings.bind_port))
for host in GLSettings.accepted_hosts:
if host not in GLSettings.bind_addresses:
log.msg("Visit http://%s:%d to interact with me" % (host, GLSettings.bind_port))
for other in GLSettings.configured_hosts:
if other:
log.msg("Visit %s to interact with me" % other)
log.msg("Remind: GlobaLeaks is not accessible from other URLs, this is strictly enforced")
log.msg("Check documentation in https://github.com/globaleaks/GlobaLeaks/wiki/ for special enhancement")
except Exception as excep:
log.err("ERROR: Cannot start GlobaLeaks; please manual check the error.")
log.err("EXCEPTION: %s" % excep)
self._reactor.stop()
示例11: start_globaleaks
def start_globaleaks(self):
try:
GLSettings.fix_file_permissions()
GLSettings.drop_privileges()
GLSettings.check_directories()
GLSettings.orm_tp.start()
self._reactor.addSystemEventTrigger('after', 'shutdown', GLSettings.orm_tp.stop)
if GLSettings.initialize_db:
yield init_db()
yield clean_untracked_files()
yield refresh_memory_variables()
self.start_asynchronous_jobs()
except Exception as excep:
log.err("ERROR: Cannot start GlobaLeaks; please manually check the error.")
log.err("EXCEPTION: %s" % excep)
self._reactor.stop()
示例12: start_globaleaks
def start_globaleaks(self):
try:
GLSettings.fix_file_permissions()
GLSettings.drop_privileges()
GLSettings.check_directories()
if GLSettings.initialize_db:
yield init_db()
else:
yield update_version()
yield init_appdata()
yield clean_untracked_files()
yield refresh_memory_variables()
self.start_asynchronous_jobs()
except Exception as excep:
log.err("ERROR: Cannot start GlobaLeaks; please manual check the error.")
log.err("EXCEPTION: %s" % excep)
self._reactor.stop()
示例13: globaleaks_start
def globaleaks_start():
GLSettings.fix_file_permissions()
GLSettings.drop_privileges()
GLSettings.check_directories()
if not GLSettings.accepted_hosts:
log.err("Missing a list of hosts usable to contact GLBackend, abort")
return False
d = create_tables()
d.addCallback(clean_untracked_files)
@d.addCallback
@defer.inlineCallbacks
def cb(res):
start_asynchronous()
yield import_memory_variables()
tor_configured_hosts = yield apply_cli_options()
log.msg("GLBackend is now running")
for ip in GLSettings.bind_addresses:
log.msg("Visit http://%s:%d to interact with me" % (ip, GLSettings.bind_port))
for host in GLSettings.accepted_hosts:
if host not in GLSettings.bind_addresses:
log.msg("Visit http://%s:%d to interact with me" % (host, GLSettings.bind_port))
if tor_configured_hosts:
for other in tor_configured_hosts:
if other:
log.msg("Visit %s to interact with me" % other)
log.msg("Remind: GlobaLeaks is not accessible from other URLs, this is strictly enforced")
log.msg("Check documentation in https://github.com/globaleaks/GlobaLeaks/wiki/ for special enhancement")
return True
示例14: perform_version_update
def perform_version_update(version):
"""
@param version:
@return:
"""
to_delete_on_fail = []
to_delete_on_success = []
if version < FIRST_DATABASE_VERSION_SUPPORTED:
GLSettings.print_msg("Migrations from DB version lower than %d are no more supported!" % FIRST_DATABASE_VERSION_SUPPORTED)
GLSettings.print_msg("If you can't create your Node from scratch, contact us asking for support.")
quit()
tmpdir = os.path.abspath(os.path.join(GLSettings.db_path, 'tmp'))
orig_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % version))
final_db_file = os.path.abspath(os.path.join(GLSettings.db_path, 'glbackend-%d.db' % DATABASE_VERSION))
shutil.rmtree(tmpdir, True)
os.mkdir(tmpdir)
shutil.copy2(orig_db_file, tmpdir)
try:
while version < DATABASE_VERSION:
old_db_file = os.path.abspath(os.path.join(tmpdir, 'glbackend-%d.db' % version))
new_db_file = os.path.abspath(os.path.join(tmpdir, 'glbackend-%d.db' % (version + 1)))
GLSettings.db_file = new_db_file
GLSettings.enable_input_length_checks = False
to_delete_on_fail.append(new_db_file)
to_delete_on_success.append(old_db_file)
GLSettings.print_msg("Updating DB from version %d to version %d" % (version, version + 1))
store_old = Store(create_database('sqlite:' + old_db_file))
store_new = Store(create_database('sqlite:' + new_db_file))
# Here is instanced the migration script
MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1))
migration_script = MigrationModule.MigrationScript(migration_mapping, version, store_old, store_new)
GLSettings.print_msg("Migrating table:")
try:
try:
migration_script.prologue()
except Exception as exception:
GLSettings.print_msg("Failure while executing migration prologue: %s" % exception)
raise exception
for model_name, _ in migration_mapping.iteritems():
if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
try:
migration_script.migrate_model(model_name)
# Commit at every table migration in order to be able to detect
# the precise migration that may fail.
migration_script.commit()
except Exception as exception:
GLSettings.print_msg("Failure while migrating table %s: %s " % (model_name, exception))
raise exception
try:
migration_script.epilogue()
migration_script.commit()
except Exception as exception:
GLSettings.print_msg("Failure while executing migration epilogue: %s " % exception)
raise exception
finally:
# the database should bee always closed before leaving the application
# in order to not keep leaking journal files.
migration_script.close()
GLSettings.print_msg("Migration stats:")
# we open a new db in order to verify integrity of the generated file
store_verify = Store(create_database('sqlite:' + new_db_file))
for model_name, _ in migration_mapping.iteritems():
if model_name == 'ApplicationData':
continue
if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
count = store_verify.find(migration_script.model_to[model_name]).count()
if migration_script.entries_count[model_name] != count:
if migration_script.fail_on_count_mismatch[model_name]:
raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \
(model_name, count, migration_script.entries_count[model_name]))
else:
GLSettings.print_msg(" * %s table migrated (entries count changed from %d to %d)" % \
(model_name, migration_script.entries_count[model_name], count))
else:
GLSettings.print_msg(" * %s table migrated (%d entry(s))" % \
(model_name, migration_script.entries_count[model_name]))
version += 1
store_verify.close()
except Exception as exception:
#.........这里部分代码省略.........
示例15: filter_notification_event
def filter_notification_event(notifque):
"""
:param notifque: the current notification event queue
:return: a modified queue in the case some email has not to be sent
Basically performs two filtering; they are defined in:
1) issue #444
2) issue #798
"""
# Here we collect the Storm event of Files having as key the Tip
files_event_by_tip = {}
_tmp_list = []
return_filtered_list = []
# to be smoked Storm.id
orm_id_to_be_skipped = []
for ne in notifque:
if ne['trigger'] != u'Tip':
continue
files_event_by_tip.update({ne['tip_info']['id'] : []})
log.debug("Filtering function: iterating over %d Tip" % len(files_event_by_tip.keys()))
# not files_event_by_tip contains N keys with an empty list,
# I'm looping two times because dict has random ordering
for ne in notifque:
if GLSettings.memory_copy.disable_receiver_notification_emails:
orm_id_to_be_skipped.append(ne['orm_id'])
continue
if ne['trigger'] != u'File':
_tmp_list.append(ne)
continue
if ne['tip_info']['id'] in files_event_by_tip:
orm_id_to_be_skipped.append(ne['orm_id'])
else:
_tmp_list.append(ne)
if len(orm_id_to_be_skipped):
if GLSettings.memory_copy.disable_receiver_notification_emails:
log.debug("All the %d mails will be marked as sent because the admin has disabled receivers notifications" %
len(orm_id_to_be_skipped))
else:
log.debug("Filtering function: Marked %d Files notification to be suppressed cause part of a submission" %
len(orm_id_to_be_skipped))
for ne in _tmp_list:
receiver_id = ne['receiver_info']['id']
sent_emails = GLSettings.get_mail_counter(receiver_id)
if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
log.debug("Discarding email for receiver %s due to threshold already exceeded for the current hour" %
receiver_id)
orm_id_to_be_skipped.append(ne['orm_id'])
continue
GLSettings.increment_mail_counter(receiver_id)
if sent_emails + 1 >= GLSettings.memory_copy.notification_threshold_per_hour:
log.info("Reached threshold of %d emails with limit of %d for receiver %s" % (
sent_emails,
GLSettings.memory_copy.notification_threshold_per_hour,
receiver_id)
)
# Append
anomalyevent = OD()
anomalyevent.type = u'receiver_notification_limit_reached'
anomalyevent.notification_settings = ne.notification_settings
anomalyevent.node_info = ne.node_info
anomalyevent.context_info = None
anomalyevent.receiver_info = ne.receiver_info
anomalyevent.tip_info = None
anomalyevent.subevent_info = None
anomalyevent.orm_id = '0'
return_filtered_list.append(anomalyevent)
orm_id_to_be_skipped.append(ne['orm_id'])
continue
return_filtered_list.append(ne)
log.debug("Mails filtering completed passing from #%d to #%d events" %
(len(notifque), len(return_filtered_list)))
# return the new list of event and the list of Storm.id
return return_filtered_list, orm_id_to_be_skipped