本文整理汇总了Python中socorro.lib.util.DotDict类的典型用法代码示例。如果您正苦于以下问题:Python DotDict类的具体用法?Python DotDict怎么用?Python DotDict使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DotDict类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_blocking_start
def test_blocking_start(self):
config = DotDict()
config.logger = self.logger
config.idle_delay = 1
config.quit_on_empty_queue = False
class MyTaskManager(TaskManager):
def _responsive_sleep(
self,
seconds,
wait_log_interval=0,
wait_reason=''
):
try:
if self.count >= 2:
self.quit = True
self.count += 1
except AttributeError:
self.count = 0
tm = MyTaskManager(
config,
task_func=Mock()
)
waiting_func = Mock()
tm.blocking_start(waiting_func=waiting_func)
eq_(
tm.task_func.call_count,
10
)
eq_(waiting_func.call_count, 0)
示例2: setup_mocked_s3_storage
def setup_mocked_s3_storage(
self,
executor=TransactionExecutor,
executor_for_gets=TransactionExecutor,
storage_class='BotoS3CrashStorage',
host='',
port=0,
resource_class=S3ConnectionContext,
**extra
):
config = DotDict({
'resource_class': resource_class,
'logger': mock.Mock(),
'host': host,
'port': port,
'access_key': 'this is the access key',
'secret_access_key': 'secrets',
'bucket_name': 'silliness',
'prefix': 'dev',
'calling_format': mock.Mock()
})
config.update(extra)
s3_conn = resource_class(config)
s3_conn._connect_to_endpoint = mock.Mock()
s3_conn._mocked_connection = s3_conn._connect_to_endpoint.return_value
s3_conn._calling_format.return_value = mock.Mock()
s3_conn._CreateError = mock.Mock()
s3_conn.ResponseError = mock.Mock()
s3_conn._open = mock.MagicMock()
return s3_conn
示例3: _add_process_type_to_processed_crash
def _add_process_type_to_processed_crash(self, raw_crash):
""" Electrolysis Support - Optional - raw_crash may contain a
ProcessType of plugin. In the future this value would be default,
content, maybe even Jetpack... This indicates which process was the
crashing process.
"""
process_type_additions_dict = DotDict()
process_type = self._get_truncate_or_none(raw_crash,
'ProcessType',
10)
if not process_type:
return process_type_additions_dict
process_type_additions_dict.process_type = process_type
#logger.debug('processType %s', processType)
if process_type == 'plugin':
# Bug#543776 We actually will are relaxing the non-null policy...
# a null filename, name, and version is OK. We'll use empty strings
process_type_additions_dict.PluginFilename = (
raw_crash.get('PluginFilename', '')
)
process_type_additions_dict.PluginName = (
raw_crash.get('PluginName', '')
)
process_type_additions_dict.PluginVersion = (
raw_crash.get('PluginVersion', '')
)
return process_type_additions_dict
示例4: test_doing_work_with_two_workers_and_generator
def test_doing_work_with_two_workers_and_generator(self):
config = DotDict()
config.logger = self.logger
config.number_of_threads = 2
config.maximum_queue_size = 2
my_list = []
def insert_into_list(anItem):
my_list.append(anItem)
ttm = ThreadedTaskManager(config,
task_func=insert_into_list,
job_source_iterator=(((x,), {}) for x in
xrange(10))
)
try:
ttm.start()
time.sleep(0.2)
assert len(ttm.thread_list) == 2
assert len(my_list) == 10
assert sorted(my_list) == list(range(10))
except Exception:
# we got threads to join
ttm.wait_for_completion()
raise
示例5: test_stuff_missing
def test_stuff_missing(self):
config = self.get_basic_config()
raw_crash = copy.copy(canonical_standard_raw_crash)
raw_dumps = {}
system_info = copy.copy(
canonical_processed_crash['json_dump']['system_info']
)
del system_info['cpu_count']
processed_crash = DotDict()
processed_crash.json_dump = {
'system_info': system_info
}
processor_meta = self.get_basic_processor_meta()
rule = CPUInfoRule(config)
# the call to be tested
rule.act(raw_crash, raw_dumps, processed_crash, processor_meta)
eq_(
processed_crash.cpu_info,
"GenuineIntel family 6 model 42 stepping 7"
)
eq_(processed_crash.cpu_name, 'x86')
# raw crash should be unchanged
eq_(raw_crash, canonical_standard_raw_crash)
示例6: test_save_raw_crash_no_legacy
def test_save_raw_crash_no_legacy(self):
config = self._setup_config()
config.filter_on_legacy_processing = False
crash_store = RabbitMQCrashStorage(config)
# test for "legacy_processing" missing from crash
crash_store.save_raw_crash(
raw_crash=DotDict(), dumps=DotDict(), crash_id='crash_id')
crash_store.transaction.assert_called_with(
crash_store._save_raw_crash_transaction, 'crash_id')
config.logger.reset_mock()
# test for normal save
raw_crash = DotDict()
raw_crash.legacy_processing = 0
crash_store.save_raw_crash(
raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id')
crash_store.transaction.assert_called_with(
crash_store._save_raw_crash_transaction, 'crash_id')
crash_store.transaction.reset_mock()
# test for save without regard to "legacy_processing" value
raw_crash = DotDict()
raw_crash.legacy_processing = 5
crash_store.save_raw_crash(
raw_crash=raw_crash, dumps=DotDict, crash_id='crash_id')
crash_store.transaction.assert_called_with(
crash_store._save_raw_crash_transaction, 'crash_id')
示例7: test_action_case_1
def test_action_case_1(self):
"""sentinel exsits in stack, but no secondaries"""
pc = DotDict()
pc.process_type = 'plugin'
pijd = copy.deepcopy(cannonical_json_dump)
pc.json_dump = pijd
pc.json_dump['crashing_thread']['frames'][2]['function'] = \
'NtUserSetWindowPos'
f2jd = copy.deepcopy(cannonical_json_dump)
pc.upload_file_minidump_flash2 = DotDict()
pc.upload_file_minidump_flash2.json_dump = f2jd
fake_processor = create_basic_fake_processor()
rc = DotDict()
rule = SetWindowPos()
action_result = rule.action(rc, pc, fake_processor)
ok_(action_result)
ok_('classifications' in pc)
ok_('skunk_works' in pc.classifications)
eq_(
pc.classifications.skunk_works.classification,
'NtUserSetWindowPos | other'
)
示例8: test_doing_work_with_one_worker
def test_doing_work_with_one_worker(self):
config = DotDict()
config.logger = self.logger
config.number_of_threads = 1
config.maximum_queue_size = 1
my_list = []
def insert_into_list(anItem):
my_list.append(anItem)
ttm = ThreadedTaskManager(config,
task_func=insert_into_list
)
try:
ttm.start()
time.sleep(0.2)
ok_(len(my_list) == 10,
'expected to do 10 inserts, '
'but %d were done instead' % len(my_list))
ok_(my_list == range(10),
'expected %s, but got %s' % (range(10), my_list))
ttm.stop()
except Exception:
# we got threads to join
ttm.wait_for_completion()
raise
示例9: test_add_classification_to_processed_crash
def test_add_classification_to_processed_crash(self):
rc = DotDict()
pc = DotDict()
pc.classifications = DotDict()
processor = None
skunk_rule = SkunkClassificationRule()
skunk_rule._add_classification(
pc,
'stupid',
'extra stuff'
)
self.assertTrue('classifications' in pc)
self.assertTrue('skunk_works' in pc.classifications)
self.assertEqual(
'stupid',
pc.classifications.skunk_works.classification
)
self.assertEqual(
'extra stuff',
pc.classifications.skunk_works.classification_data
)
self.assertEqual(
'0.0',
pc.classifications.skunk_works.classification_version
)
示例10: test_get_iterator
def test_get_iterator(self):
config = DotDict()
config.logger = self.logger
config.quit_on_empty_queue = False
tm = TaskManager(config, job_source_iterator=range(1))
eq_(tm._get_iterator(), [0])
def an_iter(self):
for i in range(5):
yield i
tm = TaskManager(config, job_source_iterator=an_iter)
eq_([x for x in tm._get_iterator()], [0, 1, 2, 3, 4])
class X(object):
def __init__(self, config):
self.config = config
def __iter__(self):
for key in self.config:
yield key
tm = TaskManager(config, job_source_iterator=X(config))
eq_([x for x in tm._get_iterator()], [y for y in config.keys()])
示例11: test_action_case_1
def test_action_case_1(self):
"""success - both targets found in top 5 frames of stack"""
pc = DotDict()
f2jd = copy.deepcopy(cannonical_json_dump)
pc.upload_file_minidump_flash2 = DotDict()
pc.upload_file_minidump_flash2.json_dump = f2jd
pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][1]['function'] = (
'NtUserPeekMessage'
)
pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][2]['function'] = (
'F849276792______________________________'
)
fake_processor = create_basic_fake_processor()
rc = DotDict()
rd = {}
rule = Bug812318()
action_result = rule.action(rc, rd, pc, fake_processor)
ok_(action_result)
ok_('classifications' in pc)
eq_(
pc.classifications.skunk_works.classification,
'bug812318-PeekMessage'
)
示例12: test_action_wrong_order
def test_action_wrong_order(self):
jd = copy.deepcopy(cannonical_json_dump)
jd['crashing_thread']['frames'][4]['function'] = (
"F_1152915508___________________________________"
)
jd['crashing_thread']['frames'][3]['function'] = (
"mozilla::plugins::PluginInstanceChild::UpdateWindowAttributes"
"(bool)"
)
jd['crashing_thread']['frames'][5]['function'] = (
"mozilla::ipc::RPCChannel::Call(IPC::Message*, IPC::Message*)"
)
pc = DotDict()
pc.dump = DotDict()
pc.dump.json_dump = jd
fake_processor = create_basic_fake_processor()
rc = DotDict()
rd = {}
rule = UpdateWindowAttributes()
action_result = rule.action(rc, rd, pc, fake_processor)
ok_(not action_result)
ok_('classifications' not in pc)
示例13: test_get_iterator
def test_get_iterator(self):
config = DotDict()
config.logger = self.logger
config.quit_on_empty_queue = False
tm = TaskManager(
config,
job_source_iterator=range(1),
)
assert tm._get_iterator() == [0]
def an_iter(self):
for i in range(5):
yield i
tm = TaskManager(
config,
job_source_iterator=an_iter,
)
assert list(tm._get_iterator()) == [0, 1, 2, 3, 4]
class X(object):
def __init__(self, config):
self.config = config
def __iter__(self):
for key in self.config:
yield key
tm = TaskManager(
config,
job_source_iterator=X(config)
)
assert list(tm._get_iterator()) == list(config.keys())
示例14: test_new_crash_duplicate_discovered
def test_new_crash_duplicate_discovered(self):
""" Tests queue with standard queue items only
"""
config = self._setup_config()
config.transaction_executor_class = TransactionExecutor
crash_store = RabbitMQCrashStorage(config)
crash_store.rabbitmq.config.standard_queue_name = "socorro.normal"
crash_store.rabbitmq.config.reprocessing_queue_name = "socorro.reprocessing"
crash_store.rabbitmq.config.priority_queue_name = "socorro.priority"
faked_methodframe = DotDict()
faked_methodframe.delivery_tag = "delivery_tag"
test_queue = [(None, None, None), (faked_methodframe, "1", "normal_crash_id"), (None, None, None)]
def basic_get(queue="socorro.priority"):
if len(test_queue) == 0:
raise StopIteration
return test_queue.pop()
crash_store.rabbitmq.return_value.__enter__.return_value.channel.basic_get = MagicMock(side_effect=basic_get)
transaction_connection = crash_store.transaction.db_conn_context_source.return_value.__enter__.return_value
# load the cache as if this crash had alredy been seen
crash_store.acknowledgement_token_cache["normal_crash_id"] = faked_methodframe
for result in crash_store.new_crashes():
# new crash should be suppressed
eq_(None, result)
# we should ack the new crash even though we did use it for processing
transaction_connection.channel.basic_ack.assert_called_with(delivery_tag=faked_methodframe.delivery_tag)
示例15: test_action_case_4
def test_action_case_4(self):
"""nothing in 1st dump, sentinel but no secondary in
upload_file_minidump_flash2 dump"""
pc = DotDict()
pc.dump = DotDict()
pijd = copy.deepcopy(cannonical_json_dump)
pc.dump.json_dump = pijd
f2jd = copy.deepcopy(cannonical_json_dump)
pc.upload_file_minidump_flash2 = DotDict()
pc.upload_file_minidump_flash2.json_dump = f2jd
pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][2] \
['function'] = 'NtUserSetWindowPos'
fake_processor = create_basic_fake_processor()
rc = DotDict()
rule = SetWindowPos()
action_result = rule.action(rc, pc, fake_processor)
ok_(action_result)
ok_('classifications' in pc)
ok_('skunk_works' in pc.classifications)
eq_(
pc.classifications.skunk_works.classification,
'NtUserSetWindowPos | other'
)