本文整理汇总了Python中socorro.external.postgresql.dbapi2_util.execute_no_results函数的典型用法代码示例。如果您正苦于以下问题:Python execute_no_results函数的具体用法?Python execute_no_results怎么用?Python execute_no_results使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了execute_no_results函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _queue_priorty_job_transaction
def _queue_priorty_job_transaction(self, connection, crash_id,
candidate_processor_iter):
"""this method implements a transaction, inserting a crash to both
the 'jobs' table (via the '_queue_standard_job_transaction' method)
and the 'priority_jobs_XXX' table associated with the target
processor"""
assigned_processor = self._queue_standard_job_transaction(
connection,
crash_id,
candidate_processor_iter
)
if assigned_processor is None:
return None
execute_no_results(
connection,
"insert into priority_jobs_%d (uuid) values (%%s)"
% assigned_processor,
(crash_id,)
)
execute_no_results(
connection,
"delete from priorityjobs where uuid = %s",
(crash_id,)
)
return assigned_processor
示例2: _remember_failure
def _remember_failure(
self,
connection,
class_,
duration,
exc_type,
exc_value,
exc_tb
):
exc_traceback = ''.join(traceback.format_tb(exc_tb))
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
duration,
exc_type,
exc_value,
exc_traceback
) VALUES (
%s,
%s,
%s,
%s,
%s
)""",
(
app_name,
'%.5f' % duration,
repr(exc_type),
repr(exc_value),
exc_traceback
)
)
示例3: _database_transaction
def _database_transaction(
self,
connection,
raw_adi_logs_pathname,
target_date
):
with codecs.open(raw_adi_logs_pathname, 'r', 'utf-8') as f:
pgcursor = connection.cursor()
pgcursor.copy_from(
f,
'raw_adi_logs',
null='None',
columns=[
'report_date',
'product_name',
'product_os_platform',
'product_os_version',
'product_version',
'build',
'build_channel',
'product_guid',
'count'
]
)
pgcursor.execute(_RAW_ADI_QUERY, (target_date,))
# for Bug 1159993
execute_no_results(connection, _FENNEC38_ADI_CHANNEL_CORRECTION_SQL)
示例4: _prioritize_previously_enqueued_jobs_transaction
def _prioritize_previously_enqueued_jobs_transaction(self, connection,
crash_id):
"""priorty jobs come into the system at random times. A given crash_id
may already be queued for processing when a priority request comes in
for it. To avoid repeating processing, a priority crash_id is checked
to see if it is already queued. If it is, the processor already
assigned to it is told to expedite processing. This done just by
entering the crash_id into the processors private 'priority_jobs_XXX'
table."""
try:
job_owner = single_value_sql(
connection,
"select owner from jobs where uuid = %s",
(crash_id,)
)
except SQLDidNotReturnSingleValue:
return False
priority_job_table_name = 'priority_jobs_%d' % job_owner
self.config.logger.debug(
"priority job %s was already in the queue, assigned to %d",
crash_id,
job_owner
)
try:
# detect if the found job was assigned to a processor that was
# dead by checking to see if the priority jobs table exists or
# not. If id doesn't exist, wait for the job to get reassigned
# to a live processor. It in the future, it may be better to
# just reassign the job immediately.
single_value_sql( # return value intentionally ignored
connection,
"select 1 from pg_stat_user_tables where relname = %s",
(priority_job_table_name,)
)
except SQLDidNotReturnSingleValue:
self.config.logger.debug(
"%s assigned to dead processor %d - "
"wait for reassignment",
crash_id,
job_owner
)
# likely that the job is assigned to a dead processor
# skip processing it this time around - by next time
# hopefully it will have been
# re assigned to a live processor
return False
execute_no_results(
connection,
"insert into %s (uuid) values (%%s)" %
priority_job_table_name,
(crash_id,)
)
execute_no_results(
connection,
"delete from priorityjobs where uuid = %s",
(crash_id,)
)
return True
示例5: _save_plugins
def _save_plugins(self, connection, processed_crash, report_id):
""" Electrolysis Support - Optional - processed_crash may contain a
ProcessType of plugin. In the future this value would be default,
content, maybe even Jetpack... This indicates which process was the
crashing process.
plugin - When set to plugin, the jsonDocument MUST calso contain
PluginFilename, PluginName, and PluginVersion
"""
process_type = processed_crash['process_type']
if not process_type:
return
if process_type == "plugin":
# Bug#543776 We actually will are relaxing the non-null policy...
# a null filename, name, and version is OK. We'll use empty strings
try:
plugin_filename = processed_crash['PluginFilename']
plugin_name = processed_crash['PluginName']
plugin_version = processed_crash['PluginVersion']
except KeyError, x:
self.config.logger.error(
'the crash is missing a required field: %s', str(x)
)
return
find_plugin_sql = ('select id from plugins '
'where filename = %s '
'and name = %s')
try:
plugin_id = single_value_sql(connection,
find_plugin_sql,
(plugin_filename,
plugin_name))
except SQLDidNotReturnSingleValue:
insert_plugsins_sql = ("insert into plugins (filename, name) "
"values (%s, %s) returning id")
plugin_id = single_value_sql(connection,
insert_plugsins_sql,
(plugin_filename,
plugin_name))
crash_id = processed_crash['uuid']
table_suffix = self._table_suffix_for_crash_id(crash_id)
plugin_reports_table_name = 'plugins_reports_%s' % table_suffix
plugins_reports_insert_sql = (
'insert into %s '
' (report_id, plugin_id, date_processed, version) '
'values '
' (%%s, %%s, %%s, %%s)' % plugin_reports_table_name
)
values_tuple = (report_id,
plugin_id,
processed_crash['date_processed'],
plugin_version)
execute_no_results(connection,
plugins_reports_insert_sql,
values_tuple)
示例6: _create_priority_jobs
def _create_priority_jobs(self, connection):
self.processor_id = single_value_sql(
connection, "select id from processors where name = %s", (self.processor_name,)
)
priority_jobs_table_name = "priority_jobs_%d" % self.processor_id
execute_no_results(connection, "drop table if exists %s" % priority_jobs_table_name)
execute_no_results(
connection, "create table %s (uuid varchar(50) not null primary key)" % priority_jobs_table_name
)
self.config.logger.info("created priority jobs table: %s", priority_jobs_table_name)
return priority_jobs_table_name
示例7: _save_processed_transaction
def _save_processed_transaction(self, connection, processed_crash):
report_id = self._save_processed_report(connection, processed_crash)
self._save_plugins(connection, processed_crash, report_id)
self._save_extensions(connection, processed_crash, report_id)
crash_id = processed_crash['uuid']
processed_crashes_table_name = (
'processed_crashes_%s' % self._table_suffix_for_crash_id(crash_id)
)
insert_sql = """insert into %s (uuid, processed_crash, date_processed) values
(%%s, %%s, %%s)""" % processed_crashes_table_name
savepoint_name = threading.currentThread().getName().replace('-', '')
value_list = (
crash_id,
json.dumps(processed_crash, cls=JsonDTEncoder),
processed_crash["date_processed"]
)
execute_no_results(connection, "savepoint %s" % savepoint_name)
try:
execute_no_results(connection, insert_sql, value_list)
execute_no_results(
connection,
"release savepoint %s" % savepoint_name
)
except self.config.database_class.IntegrityError:
# report already exists
execute_no_results(
connection,
"rollback to savepoint %s" % savepoint_name
)
示例8: _save_processed_report
def _save_processed_report(self, connection, processed_crash):
column_list = []
placeholder_list = []
value_list = []
for pro_crash_name, report_name in self._reports_table_mappings:
column_list.append(report_name)
placeholder_list.append('%s')
value_list.append(processed_crash[pro_crash_name])
crash_id = processed_crash['uuid']
reports_table_name = (
'reports_%s' % self._table_suffix_for_crash_id(crash_id)
)
insert_sql = "insert into %s (%s) values (%s) returning id" % (
reports_table_name,
', '.join(column_list),
', '.join(placeholder_list)
)
# we want to insert directly into the report table. There is a
# chance however that the record already exists. If it does, then
# the insert would fail and the connection fall into a "broken" state.
# To avoid this, we set a savepoint to which we can roll back if the
# record already exists - essentially a nested transaction.
# We use the name of the executing thread as the savepoint name.
# alternatively we could get a uuid.
savepoint_name = threading.currentThread().getName().replace('-', '')
execute_no_results(connection, "savepoint %s" % savepoint_name)
try:
report_id = single_value_sql(connection, insert_sql, value_list)
execute_no_results(
connection,
"release savepoint %s" % savepoint_name
)
except self.config.database_class.IntegrityError:
# report already exists
execute_no_results(
connection,
"rollback to savepoint %s" % savepoint_name
)
execute_no_results(
connection,
"release savepoint %s" % savepoint_name
)
execute_no_results(
connection,
"delete from %s where uuid = %%s" % reports_table_name,
(processed_crash.uuid,)
)
report_id = single_value_sql(connection, insert_sql, value_list)
return report_id
示例9: _remember_success
def _remember_success(self, connection, class_, success_date, duration):
app_name = class_.app_name
execute_no_results(
connection,
"""INSERT INTO crontabber_log (
app_name,
success,
duration
) VALUES (
%s,
%s,
%s
)""",
(app_name, success_date, '%.5f' % duration)
)
示例10: _kick_stalled_jobs_transaction
def _kick_stalled_jobs_transaction(self, connection):
"""try to restart stalled jobs by changing the startteddatetime to
NULL. This should get the attention of the assigned processor"""
self.config.logger.debug("restart stalled jobs in queue")
execute_no_results(
connection,
"update jobs "
" set starteddatetime = NULL "
"where"
" success is NULL"
" and completeddatetime is NULL"
" and starteddatetime < now() - %s - %s",
(self.config.registrar.check_in_frequency,
self.config.registrar.processor_grace_period)
)
示例11: _clean_jobs_table_transaction
def _clean_jobs_table_transaction(self, connection):
"""go through the jobs table and remove jobs that are complete"""
self.config.logger.debug("removing completed jobs from queue")
self.config.logger.debug("starting deletion")
execute_no_results(
connection,
"delete from jobs "
"where"
" uuid in (select "
" uuid"
" from"
" jobs j"
" where"
" j.success is not null)"
)
示例12: run
def run(self, connection):
select_sql = """
SELECT crash_id FROM reprocessing_jobs LIMIT 10000
"""
crash_ids = []
for (crash_id,) in execute_query_iter(connection, select_sql):
crash_ids.append(crash_id)
delete_sql = """
DELETE from reprocessing_jobs WHERE crash_id = %(crash_id)s
"""
for crash_id in crash_ids:
self.queuing_connection_factory.save_raw_crash(DotDict({"legacy_processing": 0}), [], crash_id)
execute_no_results(connection, delete_sql, {"crash_id": crash_id})
connection.commit()
示例13: test_execute_no_results
def test_execute_no_results(self):
m_execute = Mock()
m_cursor = Mock()
m_cursor.execute = m_execute
conn = Mock()
conn.cursor.return_value = m_cursor
dbapi2_util.execute_no_results(
conn,
"insert into table (a, b, c) values (%s, %s, %s)",
(1, 2, 3)
)
eq_(conn.cursor.call_count, 1)
eq_(m_cursor.execute.call_count, 1)
m_cursor.execute.assert_called_once_with(
"insert into table (a, b, c) values (%s, %s, %s)",
(1, 2, 3)
)
示例14: _save_extensions
def _save_extensions(self, connection, processed_crash, report_id):
extensions = processed_crash['addons']
if not extensions:
return
crash_id = processed_crash['uuid']
table_suffix = self._table_suffix_for_crash_id(crash_id)
extensions_table_name = 'extensions_%s' % table_suffix
extensions_insert_sql = (
"insert into %s "
" (report_id, date_processed, extension_key, extension_id, "
" extension_version)"
"values (%%s, %%s, %%s, %%s, %%s)" % extensions_table_name
)
# why are we deleting first? This might be a reprocessing job and
# the extensions data might already be in the table: a straight insert
# might fail. Why not check to see if there is data already there
# and then just not insert if data is there? We may be reprocessing
# to deal with missing extensions data, so just because there is
# already data there doesn't mean that we can skip this.
# What about using "upsert" sql - that would be fine and result in one
# fewer round trip between client and database, but "upsert" sql is
# opaque and not easy to understand at a glance. This was faster to
# implement. What about using "transaction check points"?
# too many round trips between the client and the server.
clear_extensions_sql = (
"delete from %s where report_id = %%s" % extensions_table_name
)
execute_no_results(connection, clear_extensions_sql, (report_id,))
for i, x in enumerate(extensions):
try:
execute_no_results(connection, extensions_insert_sql,
(report_id,
processed_crash['date_processed'],
i,
x[0][:100],
x[1]))
except IndexError:
self.config.logger.warning(
'"%s" is deficient as a name and version for an addon',
str(x[0])
)
示例15: __delitem__
def __delitem__(self, connection, key):
"""remove the item by key or raise KeyError"""
try:
# result intentionally ignored
single_value_sql(
connection,
"""SELECT app_name
FROM crontabber
WHERE
app_name = %s""",
(key,)
)
except SQLDidNotReturnSingleValue:
raise KeyError(key)
# item exists
execute_no_results(
connection,
"""DELETE FROM crontabber
WHERE app_name = %s""",
(key,)
)